summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c21
-rw-r--r--drivers/accel/qaic/qaic_ras.c4
-rw-r--r--drivers/bluetooth/btintel_pcie.c13
-rw-r--r--drivers/bluetooth/btintel_pcie.h2
-rw-r--r--drivers/bluetooth/btmtk.c15
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_h4.c3
-rw-r--r--drivers/bluetooth/hci_h5.c3
-rw-r--r--drivers/bluetooth/virtio_bt.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c13
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c8
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c18
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c2
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c4
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c2
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83121a.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c6
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c9
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c8
-rw-r--r--drivers/gpu/drm/tiny/bochs.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c18
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c5
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c11
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c7
-rw-r--r--drivers/iommu/amd/amd_iommu.h3
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h21
-rw-r--r--drivers/iommu/amd/init.c123
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/amd/ppr.c8
-rw-r--r--drivers/iommu/iommu-pages.h2
-rw-r--r--drivers/net/dsa/mt7530.c75
-rw-r--r--drivers/net/dsa/mt7530.h8
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c374
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c231
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c3
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c40
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c5
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-nuvoton.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c4
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/netdevsim/netdevsim.h4
-rw-r--r--drivers/net/netdevsim/psp.c65
-rw-r--r--drivers/net/ovpn/io.c7
-rw-r--r--drivers/net/phy/bcm-phy-lib.c9
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm7xxx.c14
-rw-r--r--drivers/net/phy/broadcom.c5
-rw-r--r--drivers/net/phy/micrel.c15
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c77
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c105
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c3
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h5
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c20
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h2
-rw-r--r--drivers/platform/wmi/core.c3
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c2
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.c2
-rw-r--r--drivers/pmdomain/core.c10
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c10
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/spi/spi-ch341.c2
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c99
123 files changed, 1664 insertions, 737 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 2801378e3e19..3b7b008bccfe 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -537,6 +537,26 @@ static const struct file_operations ivpu_fops = {
#endif
};
+static int ivpu_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv,
+ u32 handle, u32 flags, int *prime_fd)
+{
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (drm_gem_is_imported(obj)) {
+ /* Do not allow re-exporting */
+ drm_gem_object_put(obj);
+ return -EOPNOTSUPP;
+ }
+
+ drm_gem_object_put(obj);
+
+ return drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+}
+
static const struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
@@ -545,6 +565,7 @@ static const struct drm_driver driver = {
.gem_create_object = ivpu_gem_create_object,
.gem_prime_import = ivpu_gem_prime_import,
+ .prime_handle_to_fd = ivpu_gem_prime_handle_to_fd,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
index cc0b75461e1a..6791af366cba 100644
--- a/drivers/accel/qaic/qaic_ras.c
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -497,11 +497,11 @@ static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
qdev->ce_count++;
break;
case UE:
- if (qdev->ce_count != UINT_MAX)
+ if (qdev->ue_count != UINT_MAX)
qdev->ue_count++;
break;
case UE_NF:
- if (qdev->ce_count != UINT_MAX)
+ if (qdev->ue_nf_count != UINT_MAX)
qdev->ue_nf_count++;
break;
default:
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 2f59c0d6f9ec..a3643e67b33f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -289,6 +289,9 @@ static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
skb_put_data(skb, buf, strlen(buf));
data->boot_stage_cache = reg;
+ if (reg & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_WARNING)
+ bt_dev_warn(hdev, "Controller device warning (boot_stage: 0x%8.8x)", reg);
+
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
skb_put_data(skb, buf, strlen(buf));
@@ -880,8 +883,11 @@ static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
{
- return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
- (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+ if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_WARNING)
+ bt_dev_warn(data->hdev, "Controller device warning (boot_stage: 0x%8.8x)",
+ data->boot_stage_cache);
+
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER;
}
static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
@@ -914,7 +920,8 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
data->img_resp_cache = reg;
if (btintel_pcie_in_error(data)) {
- bt_dev_err(data->hdev, "Controller in error state");
+ bt_dev_err(data->hdev, "Controller in error state (boot_stage: 0x%8.8x)",
+ data->boot_stage_cache);
btintel_pcie_dump_debug_registers(data->hdev);
return;
}
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 3c7bb708362d..f922abd1e7d8 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -48,7 +48,7 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
-#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_WARNING (BIT(12))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 6fb6ca274808..f70c1b0f8990 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -695,8 +695,13 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
if (data->evt_skb == NULL)
goto err_free_wc;
- /* Parse and handle the return WMT event */
- wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ wmt_evt = skb_pull_data(data->evt_skb, sizeof(*wmt_evt));
+ if (!wmt_evt) {
+ bt_dev_err(hdev, "WMT event too short (%u bytes)",
+ data->evt_skb->len);
+ err = -EINVAL;
+ goto err_free_skb;
+ }
if (wmt_evt->whdr.op != hdr->op) {
bt_dev_err(hdev, "Wrong op received %d expected %d",
wmt_evt->whdr.op, hdr->op);
@@ -712,6 +717,12 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
status = BTMTK_WMT_PATCH_DONE;
break;
case BTMTK_WMT_FUNC_CTRL:
+ if (!skb_pull_data(data->evt_skb,
+ sizeof(wmt_evt_funcc->status))) {
+ err = -EINVAL;
+ goto err_free_skb;
+ }
+
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index fa679ad0acdf..8201fa7f61e8 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,6 +191,9 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
+ if (!ath)
+ return -ENODEV;
+
ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index b386f91d8b46..db56eead27ce 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -585,6 +585,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
+ if (!bcsp)
+ return -ENODEV;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index a889a66a326f..767372707498 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -109,6 +109,9 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
{
struct h4_struct *h4 = hu->priv;
+ if (!h4)
+ return -ENODEV;
+
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index cfdf75dc2847..d35383718212 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -587,6 +587,9 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
struct h5 *h5 = hu->priv;
const unsigned char *ptr = data;
+ if (!h5)
+ return -ENODEV;
+
BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
count);
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 76d61af8a275..140ab55c9fc5 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -12,6 +12,7 @@
#include <net/bluetooth/hci_core.h>
#define VERSION "0.1"
+#define VIRTBT_RX_BUF_SIZE 1000
enum {
VIRTBT_VQ_TX,
@@ -33,11 +34,11 @@ static int virtbt_add_inbuf(struct virtio_bluetooth *vbt)
struct sk_buff *skb;
int err;
- skb = alloc_skb(1000, GFP_KERNEL);
+ skb = alloc_skb(VIRTBT_RX_BUF_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- sg_init_one(sg, skb->data, 1000);
+ sg_init_one(sg, skb->data, VIRTBT_RX_BUF_SIZE);
err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
if (err < 0) {
@@ -197,6 +198,7 @@ static int virtbt_shutdown_generic(struct hci_dev *hdev)
static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
{
+ size_t min_hdr;
__u8 pkt_type;
pkt_type = *((__u8 *) skb->data);
@@ -204,16 +206,32 @@ static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
switch (pkt_type) {
case HCI_EVENT_PKT:
+ min_hdr = sizeof(struct hci_event_hdr);
+ break;
case HCI_ACLDATA_PKT:
+ min_hdr = sizeof(struct hci_acl_hdr);
+ break;
case HCI_SCODATA_PKT:
+ min_hdr = sizeof(struct hci_sco_hdr);
+ break;
case HCI_ISODATA_PKT:
- hci_skb_pkt_type(skb) = pkt_type;
- hci_recv_frame(vbt->hdev, skb);
+ min_hdr = sizeof(struct hci_iso_hdr);
break;
default:
kfree_skb(skb);
- break;
+ return;
+ }
+
+ if (skb->len < min_hdr) {
+ bt_dev_err_ratelimited(vbt->hdev,
+ "rx pkt_type 0x%02x payload %u < hdr %zu\n",
+ pkt_type, skb->len, min_hdr);
+ kfree_skb(skb);
+ return;
}
+
+ hci_skb_pkt_type(skb) = pkt_type;
+ hci_recv_frame(vbt->hdev, skb);
}
static void virtbt_rx_work(struct work_struct *work)
@@ -227,8 +245,15 @@ static void virtbt_rx_work(struct work_struct *work)
if (!skb)
return;
- skb_put(skb, len);
- virtbt_rx_handle(vbt, skb);
+ if (!len || len > VIRTBT_RX_BUF_SIZE) {
+ bt_dev_err_ratelimited(vbt->hdev,
+ "rx reply len %u outside [1, %u]\n",
+ len, VIRTBT_RX_BUF_SIZE);
+ kfree_skb(skb);
+ } else {
+ skb_put(skb, len);
+ virtbt_rx_handle(vbt, skb);
+ }
if (virtbt_add_inbuf(vbt) < 0)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 46aae3fad4bf..60debd543e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -3149,11 +3149,7 @@ static int __init amdgpu_init(void)
r = amdgpu_sync_init();
if (r)
- goto error_sync;
-
- r = amdgpu_userq_fence_slab_init();
- if (r)
- goto error_fence;
+ return r;
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
@@ -3161,7 +3157,7 @@ static int __init amdgpu_init(void)
/* Ignore KFD init failures when CONFIG_HSA_AMD is not set. */
r = amdgpu_amdkfd_init();
if (r && r != -ENOENT)
- goto error_fence;
+ goto error_fini_sync;
if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
@@ -3172,10 +3168,8 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(&amdgpu_kms_pci_driver);
-error_fence:
+error_fini_sync:
amdgpu_sync_fini();
-
-error_sync:
return r;
}
@@ -3186,7 +3180,6 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index bc772ca3dab7..b6f849d51c2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -262,12 +262,19 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
+ int r;
+
if (adev->gart.bo != NULL)
return 0;
- return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
- NULL, (void *)&adev->gart.ptr);
+ r = amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
+ NULL, (void *)&adev->gart.ptr);
+ if (r)
+ return r;
+
+ memset_io(adev->gart.ptr, adev->gart.gart_pte_flags, adev->gart.table_size);
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index da39ac862f37..e2d5f04296e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -32,29 +32,9 @@
#include "amdgpu.h"
#include "amdgpu_userq_fence.h"
-static const struct dma_fence_ops amdgpu_userq_fence_ops;
-static struct kmem_cache *amdgpu_userq_fence_slab;
-
#define AMDGPU_USERQ_MAX_HANDLES (1U << 16)
-int amdgpu_userq_fence_slab_init(void)
-{
- amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
- sizeof(struct amdgpu_userq_fence),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!amdgpu_userq_fence_slab)
- return -ENOMEM;
-
- return 0;
-}
-
-void amdgpu_userq_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_userq_fence_slab);
-}
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
{
@@ -231,7 +211,7 @@ void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
{
- *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ *userq_fence = kmalloc(sizeof(**userq_fence), GFP_KERNEL);
return *userq_fence ? 0 : -ENOMEM;
}
@@ -342,7 +322,7 @@ static void amdgpu_userq_fence_free(struct rcu_head *rcu)
amdgpu_userq_fence_driver_put(fence_drv);
kvfree(userq_fence->fence_drv_array);
- kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ kfree(userq_fence);
}
static void amdgpu_userq_fence_release(struct dma_fence *f)
@@ -545,7 +525,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
if (r) {
mutex_unlock(&userq_mgr->userq_mutex);
- kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ kfree(userq_fence);
goto put_gobj_write;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
index d56246ad8c26..d355a0eecc07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -58,9 +58,6 @@ struct amdgpu_userq_fence_driver {
char timeline_name[TASK_COMM_LEN];
};
-int amdgpu_userq_fence_slab_init(void);
-void amdgpu_userq_fence_slab_fini(void);
-
void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 95be105671ec..86c7c2a429b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5660,9 +5660,6 @@ static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
{
struct amdgpu_device *adev = ring->adev;
- /* we only allocate 32bit for each seq wb address */
- BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
/* write fence seq to the "addr" */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index 2fc39a6938f6..5b4121ddc78c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -31,89 +31,68 @@
#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
static int
-mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
-{
- int ret;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (ret) {
- DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
- goto err_reserve_bo_failed;
- }
-
- ret = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (ret) {
- DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
- goto err_map_bo_gart_failed;
- }
-
- amdgpu_bo_unreserve(bo);
- bo = amdgpu_bo_ref(bo);
-
- return 0;
-
-err_map_bo_gart_failed:
- amdgpu_bo_unreserve(bo);
-err_reserve_bo_failed:
- return ret;
-}
-
-static int
mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue,
uint64_t wptr)
{
struct amdgpu_bo_va_mapping *wptr_mapping;
- struct amdgpu_vm *wptr_vm;
struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ struct amdgpu_bo *obj;
+ struct amdgpu_vm *vm = queue->vm;
+ struct drm_exec exec;
int ret;
- wptr_vm = queue->vm;
- ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
- if (ret)
- return ret;
-
wptr &= AMDGPU_GMC_HOLE_MASK;
- wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
- amdgpu_bo_unreserve(wptr_vm->root.bo);
- if (!wptr_mapping) {
- DRM_ERROR("Failed to lookup wptr bo\n");
- return -EINVAL;
- }
- wptr_obj->obj = wptr_mapping->bo_va->base.bo;
- if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
- DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
- return -EINVAL;
- }
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 2);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto fail_lock;
+
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(vm, wptr >> PAGE_SHIFT);
+ if (!wptr_mapping) {
+ ret = -EINVAL;
+ goto fail_lock;
+ }
- ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
- if (ret) {
- DRM_ERROR("Failed to map wptr bo to GART\n");
- return ret;
+ obj = wptr_mapping->bo_va->base.bo;
+ ret = drm_exec_lock_obj(&exec, &obj->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto fail_lock;
}
- ret = amdgpu_bo_reserve(wptr_obj->obj, true);
- if (ret) {
- DRM_ERROR("Failed to reserve wptr bo\n");
- return ret;
+ wptr_obj->obj = amdgpu_bo_ref(wptr_mapping->bo_va->base.bo);
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto fail_map;
}
/* TODO use eviction fence instead of pinning. */
ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
- drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n");
- goto unresv_bo;
+ DRM_ERROR("Failed to pin wptr bo. ret %d\n", ret);
+ goto fail_map;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&wptr_obj->obj->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto fail_map;
}
queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
- amdgpu_bo_unreserve(wptr_obj->obj);
+ drm_exec_fini(&exec);
return 0;
-unresv_bo:
- amdgpu_bo_unreserve(wptr_obj->obj);
+fail_map:
+ amdgpu_bo_unref(&wptr_obj->obj);
+fail_lock:
+ drm_exec_fini(&exec);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 44f0f23e1148..e64f2f6df9a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -889,7 +889,7 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
- BUG_ON(addr & 0x3);
+ WARN_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
@@ -899,7 +899,7 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
- BUG_ON(addr & 0x3);
+ WARN_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f829d65a79b4..f95bf6d95534 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1360,7 +1360,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
+ kfd_flush_tlb(peer_pdd);
}
kfree(devices_arr);
@@ -1455,7 +1455,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
if (WARN_ON_ONCE(!peer_pdd))
continue;
if (flush_tlb)
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
+ kfd_flush_tlb(peer_pdd);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8ff97bf7d95a..b7f8f7ff8198 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1737,37 +1737,6 @@ bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entr
return false;
}
-/* check if there is kfd process still uses adev */
-static bool kgd2kfd_check_device_idle(struct amdgpu_device *adev)
-{
- struct kfd_process *p;
- struct hlist_node *p_temp;
- unsigned int temp;
- struct kfd_node *dev;
-
- mutex_lock(&kfd_processes_mutex);
-
- if (hash_empty(kfd_processes_table)) {
- mutex_unlock(&kfd_processes_mutex);
- return true;
- }
-
- /* check if there is device still use adev */
- hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
- for (int i = 0; i < p->n_pdds; i++) {
- dev = p->pdds[i]->dev;
- if (dev->adev == adev) {
- mutex_unlock(&kfd_processes_mutex);
- return false;
- }
- }
- }
-
- mutex_unlock(&kfd_processes_mutex);
-
- return true;
-}
-
/** kgd2kfd_teardown_processes - gracefully tear down existing
* kfd processes that use adev
*
@@ -1800,7 +1769,7 @@ void kgd2kfd_teardown_processes(struct amdgpu_device *adev)
mutex_unlock(&kfd_processes_mutex);
/* wait all kfd processes use adev terminate */
- while (!kgd2kfd_check_device_idle(adev))
+ while (!!atomic_read(&adev->kfd.dev->kfd_processes_count))
cond_resched();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ab3b2e7be9bd..9185ebe4c079 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -572,7 +572,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
qpd->vmid,
qpd->page_table_base);
/* invalidate the VM context after pasid and vmid mapping is set up */
- kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
+ kfd_flush_tlb(qpd_to_pdd(qpd));
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
@@ -610,7 +610,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
if (flush_texture_cache_nocpsch(q->device, qpd))
dev_err(dev, "Failed to flush TC\n");
- kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
+ kfd_flush_tlb(qpd_to_pdd(qpd));
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
@@ -1284,7 +1284,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
- kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
+ kfd_flush_tlb(pdd);
}
/* Take a safe reference to the mm_struct, which may otherwise
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 163d665a6074..7b5b12206919 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1554,13 +1554,13 @@ void kfd_signal_reset_event(struct kfd_node *dev);
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
void kfd_signal_process_terminate_event(struct kfd_process *p);
-static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
- enum TLB_FLUSH_TYPE type)
+static inline void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
- amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
+ amdgpu_vm_flush_compute_tlb(adev, vm, TLB_FLUSH_HEAVYWEIGHT,
+ pdd->dev->xcc_mask);
}
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 38085a0a0f58..35ec67d9739b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1424,7 +1424,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
if (r)
break;
}
- kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
+ kfd_flush_tlb(pdd);
}
return r;
@@ -1571,7 +1571,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
}
}
- kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
+ kfd_flush_tlb(pdd);
}
return r;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 731355bdb9bc..3650e7beeb67 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -1333,12 +1333,13 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
dev_id = adev->pdev->device;
- if ((dpm_table->mclk_table.count >= 2)
- && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
- smu_data->smc_state_table.MemoryLevel[1].MinVddci =
- smu_data->smc_state_table.MemoryLevel[0].MinVddci;
- smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
- smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+ if ((dpm_table->mclk_table.count >= 2) &&
+ ((dev_id == 0x67B0) || (dev_id == 0x67B1)) &&
+ (adev->pdev->revision == 0)) {
+ smu_data->smc_state_table.MemoryLevel[1].MinVddc =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddc;
+ smu_data->smc_state_table.MemoryLevel[1].MinVddcPhases =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddcPhases;
}
smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index d9b388165de1..6c427bc75896 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -1293,7 +1293,7 @@ static const struct drm_edid *tda998x_edid_read(struct tda998x_priv *priv,
* can't handle signals gracefully.
*/
if (tda998x_edid_delay_wait(priv))
- return 0;
+ return NULL;
if (priv->rev == TDA19988)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
@@ -1762,7 +1762,7 @@ static const struct drm_bridge_funcs tda998x_bridge_funcs = {
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
struct device_node *np)
{
- const u32 *port_data;
+ const __be32 *port_data;
u32 size;
int i;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a80a335f4148..1541fc8a9ac2 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -490,7 +490,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
* the number of horizontal pixels that need an update.
*/
off_t bit_off = (off % line_length) * 8;
- off_t bit_end = (end % line_length) * 8;
+ off_t bit_end = bit_off + len * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d6424267260b..51a887cc7fd7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1019,7 +1019,7 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_change_handle *args = data;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj, *idrobj;
int handle, ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
@@ -1042,8 +1042,29 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&file_priv->prime.lock);
spin_lock(&file_priv->table_lock);
+
+ /* When create_tail allocs an obj idr, it needs to first alloc as NULL,
+ * then later replace with the correct object. This is not necessary
+ * here, because the only operations that could race are drm_prime
+ * bookkeeping, and we hold the prime lock.
+ */
ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1,
GFP_NOWAIT);
+
+ if (ret < 0) {
+ spin_unlock(&file_priv->table_lock);
+ goto out_unlock;
+ }
+
+ idrobj = idr_replace(&file_priv->object_idr, NULL, handle);
+ if (idrobj != obj) {
+ idr_replace(&file_priv->object_idr, idrobj, handle);
+ idr_remove(&file_priv->object_idr, args->new_handle);
+ spin_unlock(&file_priv->table_lock);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
spin_unlock(&file_priv->table_lock);
if (ret < 0)
@@ -1055,6 +1076,8 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
if (ret < 0) {
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
+ idrobj = idr_replace(&file_priv->object_idr, obj, handle);
+ WARN_ON(idrobj != NULL);
spin_unlock(&file_priv->table_lock);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index df4232d7e135..3cc50d697c89 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -116,16 +116,18 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
*/
mutex_lock(&gpu->sched_lock);
+ ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
+ NULL, xa_limit_32b, &gpu->next_user_fence,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto out_unlock;
+
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
- submit->out_fence, xa_limit_32b,
- &gpu->next_user_fence, GFP_KERNEL);
- if (ret < 0) {
- drm_sched_job_cleanup(&submit->sched_job);
- goto out_unlock;
- }
+
+ xa_store(&gpu->user_fences, submit->out_fence_id,
+ submit->out_fence, GFP_KERNEL);
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 29a8366513fa..e68c954ec3e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -423,7 +423,9 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->bridge.of_node = dev->of_node;
- drm_bridge_add(&mic->bridge);
+ ret = devm_drm_bridge_add(dev, &mic->bridge);
+ if (ret)
+ goto err;
pm_runtime_enable(dev);
@@ -443,12 +445,8 @@ err:
static void exynos_mic_remove(struct platform_device *pdev)
{
- struct exynos_mic *mic = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct of_device_id exynos_mic_of_match[] = {
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 385a634c3ed0..d9be7a5a239c 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -750,9 +750,8 @@ static bool has_auxccs(struct drm_device *drm)
{
struct drm_i915_private *i915 = to_i915(drm);
- return IS_GRAPHICS_VER(i915, 9, 12) ||
- IS_ALDERLAKE_P(i915) ||
- IS_METEORLAKE(i915);
+ return IS_GRAPHICS_VER(i915, 9, 12) &&
+ !HAS_FLAT_CCS(i915);
}
static bool has_fenced_regions(struct drm_device *drm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 72848ed80df7..b101e14f841e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2513,6 +2513,7 @@ static const struct nvkm_device_chip
nv170_chipset = {
.name = "GA100",
.bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga100_fb_new },
@@ -2529,7 +2530,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
- .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
@@ -3341,7 +3341,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
- case 0x170: device->chip = &nv170_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
@@ -3361,6 +3360,14 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x1b6: device->chip = &nv1b6_chipset; break;
case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
+
if (!device->chip) {
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index fdd820eeef81..27a13aeccd3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -41,11 +41,15 @@ ga100_gsp_flcn = {
static const struct nvkm_gsp_func
ga100_gsp = {
.flcn = &ga100_gsp_flcn,
+ .fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
.booter.ctor = tu102_gsp_booter_ctor,
+ .fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
+ .fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
+
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index dd82c76b8b9a..19cb269e7a26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -318,13 +318,8 @@ tu102_gsp_oneinit(struct nvkm_gsp *gsp)
if (ret)
return ret;
- /*
- * Calculate FB layout. FRTS is a memory region created by the FWSEC-FRTS firmware.
- * FWSEC comes from VBIOS. So on systems with no VBIOS (e.g. GA100), the FRTS does
- * not exist. Therefore, use the existence of VBIOS to determine whether to reserve
- * an FRTS region.
- */
- gsp->fb.wpr2.frts.size = device->bios ? 0x100000 : 0;
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
@@ -348,12 +343,9 @@ tu102_gsp_oneinit(struct nvkm_gsp *gsp)
if (ret)
return ret;
- /* Only boot FWSEC-FRTS if it actually exists */
- if (gsp->fb.wpr2.frts.size) {
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
- }
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
/* Reset GSP into RISC-V mode. */
ret = gsp->func->reset(gsp);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d6863b28ddc5..d592f4f4b939 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -208,6 +208,7 @@ config DRM_PANEL_HIMAX_HX83121A
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
select DRM_KMS_HELPER
help
Say Y here if you want to enable support for Himax HX83121A-based
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index d5fe105bdbdd..658ce64c71eb 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1324,6 +1324,8 @@ static int boe_panel_disable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_off_multi(&ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ boe->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
mipi_dsi_msleep(&ctx, 150);
return ctx.accum_err;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 4f8d6d8c07e4..dbdb7e3cb7b6 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -98,9 +98,7 @@ static int feiyang_enable(struct drm_panel *panel)
/* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
msleep(200);
- mipi_dsi_dcs_set_display_on(ctx->dsi);
-
- return 0;
+ return mipi_dsi_dcs_set_display_on(ctx->dsi);
}
static int feiyang_disable(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 8b2a68ee851e..a5e5c9ea7a73 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -937,6 +937,8 @@ static int hx83102_disable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
mipi_dsi_msleep(&dsi_ctx, 150);
return dsi_ctx.accum_err;
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83121a.c b/drivers/gpu/drm/panel/panel-himax-hx83121a.c
index ebe643ba4184..bed79aa06f46 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83121a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83121a.c
@@ -596,8 +596,8 @@ static int himax_probe(struct mipi_dsi_device *dsi)
ctx = devm_drm_panel_alloc(dev, struct himax, panel, &himax_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
- if (!ctx)
- return -ENOMEM;
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(himax_supplies),
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 2bbb1168a3ff..1e6a2392d7c6 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -118,12 +118,13 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Complete initialization. */
ret = drm_dev_register(&qdev->ddev, ent->driver_data);
if (ret)
- goto modeset_cleanup;
+ goto poll_fini;
drm_client_setup(&qdev->ddev, NULL);
return 0;
-modeset_cleanup:
+poll_fini:
+ drm_kms_helper_poll_fini(&qdev->ddev);
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
@@ -154,6 +155,7 @@ qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_kms_helper_poll_fini(dev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
if (pci_is_vga(pdev) && pdev->revision < 5)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 22321eb95b7d..703848fac189 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2461,7 +2461,8 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev,
if (patch &&
((rdev->pdev->device == 0x67B0) ||
- (rdev->pdev->device == 0x67B1))) {
+ (rdev->pdev->device == 0x67B1)) &&
+ (rdev->pdev->revision == 0)) {
if ((memory_clock > 100000) && (memory_clock <= 125000)) {
tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
*dram_timimg2 &= ~0x00ff0000;
@@ -3304,7 +3305,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
if ((dpm_table->mclk_table.count >= 2) &&
- ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
+ ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1)) &&
+ (rdev->pdev->revision == 0)) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
pi->smc_state_table.MemoryLevel[0].MinVddc;
pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
@@ -4493,7 +4495,8 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
if (patch &&
((rdev->pdev->device == 0x67B0) ||
- (rdev->pdev->device == 0x67B1))) {
+ (rdev->pdev->device == 0x67B1)) &&
+ (rdev->pdev->revision == 0)) {
for (i = 0; i < table->last; i++) {
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index b7397827889c..360a88ca8f0c 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -741,6 +741,7 @@ static int sti_hda_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hda *hda;
struct resource *res;
+ int ret;
DRM_INFO("%s\n", __func__);
@@ -779,7 +780,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return PTR_ERR(hda->clk_hddac);
}
- drm_bridge_add(&hda->bridge);
+ ret = devm_drm_bridge_add(dev, &hda->bridge);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, hda);
@@ -788,10 +791,7 @@ static int sti_hda_probe(struct platform_device *pdev)
static void sti_hda_remove(struct platform_device *pdev)
{
- struct sti_hda *hda = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &sti_hda_ops);
- drm_bridge_remove(&hda->bridge);
}
static const struct of_device_id hda_of_match[] = {
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 222e4ae1abbd..5d8dc5efec77 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -761,25 +761,21 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
ret = pcim_enable_device(pdev);
if (ret)
- goto err_free_dev;
+ return ret;
pci_set_drvdata(pdev, dev);
ret = bochs_load(bochs);
if (ret)
- goto err_free_dev;
+ return ret;
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_free_dev;
+ return ret;
drm_client_setup(dev, NULL);
return ret;
-
-err_free_dev:
- drm_dev_put(dev);
- return ret;
}
static void bochs_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 26a3689e5fd9..278bbe7a11ad 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -206,6 +206,14 @@ error_free:
return NULL;
}
+static void __free_pages_gpu_account(struct page *p, unsigned int order,
+ bool reclaim)
+{
+ mod_lruvec_page_state(p, reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
+ -(1 << order));
+ __free_pages(p, order);
+}
+
/* Reset the caching and pages of size 1 << order */
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
unsigned int order, struct page *p, bool reclaim)
@@ -223,9 +231,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
#endif
if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
- mod_lruvec_page_state(p, reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
- -(1 << order));
- __free_pages(p, order);
+ __free_pages_gpu_account(p, order, reclaim);
return;
}
@@ -606,7 +612,7 @@ static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
*/
ttm_pool_split_for_swap(restore->pool, p);
copy_highpage(restore->alloced_page + i, p);
- __free_pages(p, 0);
+ __free_pages_gpu_account(p, 0, false);
}
restore->restored_pages++;
@@ -1068,7 +1074,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
if (flags->purge) {
shrunken += num_pages;
page->private = 0;
- __free_pages(page, order);
+ __free_pages_gpu_account(page, order, false);
memset(tt->pages + i, 0,
num_pages * sizeof(*tt->pages));
}
@@ -1109,7 +1115,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
}
handle = shandle;
tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
- put_page(page);
+ __free_pages_gpu_account(page, 0, false);
shrunken++;
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 29c72aa4b0d2..33494b86205d 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -37,9 +37,17 @@ static bool intel_hdcp_gsc_check_status(struct drm_device *drm)
struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
- struct xe_gsc *gsc = &gt->uc.gsc;
+ struct xe_gsc *gsc;
+
+ if (!gt) {
+ drm_dbg_kms(&xe->drm,
+ "not checking GSC status for HDCP2.x: media GT not present or disabled\n");
+ return false;
+ }
+
+ gsc = &gt->uc.gsc;
- if (!gsc || !xe_uc_fw_is_available(&gsc->fw)) {
+ if (!xe_uc_fw_is_available(&gsc->fw)) {
drm_dbg_kms(&xe->drm,
"GSC Components not ready for HDCP2.x\n");
return false;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index 87a164efcc33..01fe03b9efe8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -385,10 +385,10 @@ static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf
if (xe_gt_is_media_type(gt))
for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
- regs[n] = xe_mmio_read32(&gt->mmio, MED_VF_SW_FLAG(n));
+ regs[n] = xe_mmio_read32(&mmio, MED_VF_SW_FLAG(n));
else
for (n = 0; n < VF_SW_FLAG_COUNT; n++)
- regs[n] = xe_mmio_read32(&gt->mmio, VF_SW_FLAG(n));
+ regs[n] = xe_mmio_read32(&mmio, VF_SW_FLAG(n));
return 0;
}
@@ -407,10 +407,10 @@ static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
if (xe_gt_is_media_type(gt))
for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
- xe_mmio_write32(&gt->mmio, MED_VF_SW_FLAG(n), regs[n]);
+ xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), regs[n]);
else
for (n = 0; n < VF_SW_FLAG_COUNT; n++)
- xe_mmio_write32(&gt->mmio, VF_SW_FLAG(n), regs[n]);
+ xe_mmio_write32(&mmio, VF_SW_FLAG(n), regs[n]);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 81b5f01b1f65..2b835d48b565 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -512,12 +512,9 @@ static void guc_golden_lrc_init(struct xe_guc_ads *ads)
* that starts after the execlists LRC registers. This is
* required to allow the GuC to restore just the engine state
* when a watchdog reset occurs.
- * We calculate the engine state size by removing the size of
- * what comes before it in the context image (which is identical
- * on all engines).
*/
ads_blob_write(ads, ads.eng_state_size[guc_class],
- real_size - xe_lrc_skip_size(xe));
+ xe_lrc_engine_state_size(gt, class));
ads_blob_write(ads, ads.golden_context_lrca[guc_class],
addr_ggtt);
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index c725cde4508d..4af9f0d7c6f3 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -746,9 +746,16 @@ size_t xe_lrc_reg_size(struct xe_device *xe)
return 80 * sizeof(u32);
}
-size_t xe_lrc_skip_size(struct xe_device *xe)
+/**
+ * xe_lrc_engine_state_size() - Get size of the engine state within LRC
+ * @gt: the &xe_gt struct instance
+ * @class: Hardware engine class
+ *
+ * Returns: Size of the engine state
+ */
+size_t xe_lrc_engine_state_size(struct xe_gt *gt, enum xe_engine_class class)
{
- return LRC_PPHWSP_SIZE + xe_lrc_reg_size(xe);
+ return xe_gt_lrc_hang_replay_size(gt, class) - xe_lrc_reg_size(gt_to_xe(gt));
}
static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index e7c975f9e2d9..5440663183f6 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -130,7 +130,7 @@ u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
size_t xe_lrc_reg_size(struct xe_device *xe);
-size_t xe_lrc_skip_size(struct xe_device *xe);
+size_t xe_lrc_engine_state_size(struct xe_gt *gt, enum xe_engine_class class);
void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index 6c4b16409cc9..150a241110fb 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -149,10 +149,11 @@ pf_migration_consume(struct xe_device *xe, unsigned int vfid)
for_each_gt(gt, xe, gt_id) {
data = xe_gt_sriov_pf_migration_save_consume(gt, vfid);
- if (data && PTR_ERR(data) != EAGAIN)
+ if (!data)
+ continue;
+ if (!IS_ERR(data) || PTR_ERR(data) != -EAGAIN)
return data;
- if (PTR_ERR(data) == -EAGAIN)
- more_data = true;
+ more_data = true;
}
if (!more_data)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1342e764a548..834d8fabfba3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,6 +11,9 @@
#include "amd_iommu_types.h"
+extern int amd_iommu_evtlog_size;
+extern int amd_iommu_pprlog_size;
+
irqreturn_t amd_iommu_int_thread(int irq, void *data);
irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index c685d3771436..f9f718087893 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/iommufd.h>
@@ -141,7 +142,6 @@
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
/* event logging constants */
-#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
#define EVENT_TYPE_MASK 0xf
#define EVENT_TYPE_ILL_DEV 0x1
@@ -259,15 +259,20 @@
#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x))
/* constants for event buffer handling */
-#define EVT_BUFFER_SIZE 8192 /* 512 entries */
-#define EVT_LEN_MASK (0x9ULL << 56)
+#define EVTLOG_ENTRY_SIZE 0x10
+#define EVTLOG_SIZE_SHIFT 56
+#define EVTLOG_SIZE_DEF SZ_8K /* 512 entries */
+#define EVTLOG_LEN_MASK_DEF (0x9ULL << EVTLOG_SIZE_SHIFT)
+#define EVTLOG_SIZE_MAX SZ_512K /* 32K entries */
+#define EVTLOG_LEN_MASK_MAX (0xFULL << EVTLOG_SIZE_SHIFT)
/* Constants for PPR Log handling */
-#define PPR_LOG_ENTRIES 512
-#define PPR_LOG_SIZE_SHIFT 56
-#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
-#define PPR_ENTRY_SIZE 16
-#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+#define PPRLOG_ENTRY_SIZE 0x10
+#define PPRLOG_SIZE_SHIFT 56
+#define PPRLOG_SIZE_DEF SZ_8K /* 512 entries */
+#define PPRLOG_LEN_MASK_DEF (0x9ULL << PPRLOG_SIZE_SHIFT)
+#define PPRLOG_SIZE_MAX SZ_512K /* 32K entries */
+#define PPRLOG_LEN_MASK_MAX (0xFULL << PPRLOG_SIZE_SHIFT)
/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */
#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 56ad020df494..3bdb380d23e9 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -132,6 +132,9 @@ struct ivhd_entry {
u8 uid;
} __attribute__((packed));
+int amd_iommu_evtlog_size = EVTLOG_SIZE_DEF;
+int amd_iommu_pprlog_size = PPRLOG_SIZE_DEF;
+
/*
* An AMD IOMMU memory definition structure. It defines things like exclusion
* ranges for devices and regions that should be unity mapped.
@@ -865,35 +868,47 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
}
/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_event_buffer(struct amd_iommu *iommu)
+static int __init alloc_event_buffer(void)
{
- iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
- EVT_BUFFER_SIZE);
+ struct amd_iommu *iommu;
- return iommu->evt_buf ? 0 : -ENOMEM;
+ for_each_iommu(iommu) {
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
+ amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
}
-static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+static void iommu_enable_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 entry;
- BUG_ON(iommu->evt_buf == NULL);
+ for_each_iommu(iommu) {
+ BUG_ON(iommu->evt_buf == NULL);
- if (!is_kdump_kernel()) {
- /*
- * Event buffer is re-used for kdump kernel and setting
- * of MMIO register is not required.
- */
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
- }
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf);
+ entry |= (amd_iommu_evtlog_size == EVTLOG_SIZE_DEF) ?
+ EVTLOG_LEN_MASK_DEF : EVTLOG_LEN_MASK_MAX;
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ }
}
/*
@@ -984,15 +999,20 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
return 0;
}
-static int __init remap_event_buffer(struct amd_iommu *iommu)
+static int __init remap_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 paddr;
pr_info_once("Re-using event buffer from the previous kernel\n");
- paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
- iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+ for_each_iommu(iommu) {
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
- return iommu->evt_buf ? 0 : -ENOMEM;
+ return 0;
}
static int __init remap_command_buffer(struct amd_iommu *iommu)
@@ -1044,10 +1064,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = remap_command_buffer(iommu);
if (ret)
return ret;
-
- ret = remap_event_buffer(iommu);
- if (ret)
- return ret;
} else {
ret = alloc_cwwb_sem(iommu);
if (ret)
@@ -1056,10 +1072,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = alloc_command_buffer(iommu);
if (ret)
return ret;
-
- ret = alloc_event_buffer(iommu);
- if (ret)
- return ret;
}
return 0;
@@ -2893,7 +2905,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
@@ -2957,7 +2968,6 @@ static void early_enable_iommus(void)
iommu_disable_event_buffer(iommu);
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -3070,6 +3080,7 @@ static void amd_iommu_resume(void *data)
for_each_iommu(iommu)
early_enable_iommu(iommu);
+ iommu_enable_event_buffer();
amd_iommu_enable_interrupts();
}
@@ -3399,6 +3410,33 @@ disable_snp:
#endif
}
+static void amd_iommu_apply_erratum_snp(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!amd_iommu_snp_en)
+ return;
+
+ /* Errata fix for Family 0x19 */
+ if (boot_cpu_data.x86 != 0x19)
+ return;
+
+ /* Set event log buffer size to max */
+ amd_iommu_evtlog_size = EVTLOG_SIZE_MAX;
+ pr_info("Applying erratum: Increase Event log size to 0x%x\n",
+ amd_iommu_evtlog_size);
+
+ /*
+ * Set PPR log buffer size to max.
+ * (Family 0x19, model < 0x10 doesn't support PPR when SNP is enabled).
+ */
+ if (boot_cpu_data.x86_model >= 0x10) {
+ amd_iommu_pprlog_size = PPRLOG_SIZE_MAX;
+ pr_info("Applying erratum: Increase PPR log size to 0x%x\n",
+ amd_iommu_pprlog_size);
+ }
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -3435,6 +3473,21 @@ static int __init state_next(void)
case IOMMU_ENABLED:
register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
+
+ amd_iommu_apply_erratum_snp();
+
+ /* Allocate/enable event log buffer */
+ if (is_kdump_kernel())
+ ret = remap_event_buffer();
+ else
+ ret = alloc_event_buffer();
+
+ if (ret) {
+ init_state = IOMMU_INIT_ERROR;
+ break;
+ }
+ iommu_enable_event_buffer();
+
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@@ -4037,11 +4090,11 @@ int amd_iommu_snp_disable(void)
return 0;
for_each_iommu(iommu) {
- ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ ret = iommu_make_shared(iommu->evt_buf, amd_iommu_evtlog_size);
if (ret)
return ret;
- ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
+ ret = iommu_make_shared(iommu->ppr_log, amd_iommu_pprlog_size);
if (ret)
return ret;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 01171361f9bc..f78e23f03938 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1010,7 +1010,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
iommu_print_event(iommu, iommu->evt_buf + head);
/* Update head pointer of hardware ring-buffer */
- head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
+ head = (head + EVTLOG_ENTRY_SIZE) % amd_iommu_evtlog_size;
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
@@ -2149,7 +2149,8 @@ static void set_dte_passthrough(struct iommu_dev_data *dev_data,
new->data[0] |= DTE_FLAG_TV | DTE_FLAG_IR | DTE_FLAG_IW;
new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domain->id) |
- (dev_data->ats_enabled) ? DTE_FLAG_IOTLB : 0;
+ (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
+
}
static void set_dte_entry(struct amd_iommu *iommu,
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index e6767c057d01..1f8d2823bea4 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -20,7 +20,7 @@
int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
{
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
- PPR_LOG_SIZE);
+ amd_iommu_pprlog_size);
return iommu->ppr_log ? 0 : -ENOMEM;
}
@@ -33,7 +33,9 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_PPR_EN);
- entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+ entry = iommu_virt_to_phys(iommu->ppr_log);
+ entry |= (amd_iommu_pprlog_size == PPRLOG_SIZE_DEF) ?
+ PPRLOG_LEN_MASK_DEF : PPRLOG_LEN_MASK_MAX;
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
&entry, sizeof(entry));
@@ -201,7 +203,7 @@ void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
raw[0] = raw[1] = 0UL;
/* Update head pointer of hardware ring-buffer */
- head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ head = (head + PPRLOG_ENTRY_SIZE) % amd_iommu_pprlog_size;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
/* Handle PPR entry */
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index ae9da4f571f6..e9e605b5fa3a 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -137,7 +137,7 @@ static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
void *virt, size_t offset,
size_t len)
{
- dma_sync_single_for_device(dma_dev, (uintptr_t)virt + offset, len,
+ dma_sync_single_for_device(dma_dev, virt_to_phys(virt) + offset, len,
DMA_TO_DEVICE);
}
void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b9423389c2ef..44d670904ad8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -25,6 +25,9 @@
#include "mt7530.h"
+#define MT7530_STATS_POLL_INTERVAL (1 * HZ)
+#define MT7530_STATS_RATE_LIMIT (HZ / 10)
+
static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mt753x_pcs, pcs);
@@ -906,10 +909,9 @@ static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
*ranges = mt7530_rmon_ranges;
}
-static void mt7530_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *storage)
+static void mt7530_read_port_stats64(struct mt7530_priv *priv, int port,
+ struct rtnl_link_stats64 *storage)
{
- struct mt7530_priv *priv = ds->priv;
uint64_t data;
/* MIB counter doesn't provide a FramesTransmittedOK but instead
@@ -951,6 +953,54 @@ static void mt7530_get_stats64(struct dsa_switch *ds, int port,
&storage->rx_crc_errors);
}
+static void mt7530_stats_refresh(struct mt7530_priv *priv)
+{
+ struct rtnl_link_stats64 stats = {};
+ struct dsa_port *dp;
+ int port;
+
+ dsa_switch_for_each_user_port(dp, priv->ds) {
+ port = dp->index;
+
+ mt7530_read_port_stats64(priv, port, &stats);
+
+ spin_lock_bh(&priv->stats_lock);
+ priv->ports[port].stats = stats;
+ priv->stats_last = jiffies;
+ spin_unlock_bh(&priv->stats_lock);
+ }
+}
+
+static void mt7530_stats_poll(struct work_struct *work)
+{
+ struct mt7530_priv *priv = container_of(work, struct mt7530_priv,
+ stats_work.work);
+
+ mt7530_stats_refresh(priv);
+ schedule_delayed_work(&priv->stats_work,
+ MT7530_STATS_POLL_INTERVAL);
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ bool refresh;
+
+ if (priv->bus) {
+ spin_lock_bh(&priv->stats_lock);
+ *storage = priv->ports[port].stats;
+ refresh = time_after(jiffies, priv->stats_last +
+ MT7530_STATS_RATE_LIMIT);
+ spin_unlock_bh(&priv->stats_lock);
+ if (refresh)
+ mod_delayed_work(system_percpu_wq,
+ &priv->stats_work, 0);
+ } else {
+ mt7530_read_port_stats64(priv, port, storage);
+ }
+}
+
static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
@@ -3137,9 +3187,24 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq_domain)
mt7530_free_mdio_irq(priv);
+ if (!ret && priv->bus) {
+ mt7530_stats_refresh(priv);
+ schedule_delayed_work(&priv->stats_work,
+ MT7530_STATS_POLL_INTERVAL);
+ }
+
return ret;
}
+static void
+mt753x_teardown(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (priv->bus)
+ cancel_delayed_work_sync(&priv->stats_work);
+}
+
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -3257,6 +3322,7 @@ static int mt7988_setup(struct dsa_switch *ds)
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
+ .teardown = mt753x_teardown,
.preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
@@ -3395,6 +3461,9 @@ mt7530_probe_common(struct mt7530_priv *priv)
priv->ds->ops = &mt7530_switch_ops;
priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
+ spin_lock_init(&priv->stats_lock);
+ INIT_DELAYED_WORK(&priv->stats_work, mt7530_stats_poll);
+
dev_set_drvdata(dev, priv);
return 0;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 3e0090bed298..dd33b0df3419 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -796,6 +796,7 @@ struct mt7530_fdb {
* @pvid: The VLAN specified is to be considered a PVID at ingress. Any
* untagged frames will be assigned to the related VLAN.
* @sgmii_pcs: Pointer to PCS instance for SerDes ports
+ * @stats: Cached port statistics for MDIO-connected switches
*/
struct mt7530_port {
bool enable;
@@ -803,6 +804,7 @@ struct mt7530_port {
u32 pm;
u16 pvid;
struct phylink_pcs *sgmii_pcs;
+ struct rtnl_link_stats64 stats;
};
/* Port 5 mode definitions of the MT7530 switch */
@@ -875,6 +877,9 @@ struct mt753x_info {
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
* @mdiodev: The pointer to the MDIO device structure
+ * @stats_lock: Protects cached per-port stats from concurrent access
+ * @stats_work: Delayed work for polling MIB counters on MDIO switches
+ * @stats_last: Jiffies timestamp of last MIB counter poll
*/
struct mt7530_priv {
struct device *dev;
@@ -900,6 +905,9 @@ struct mt7530_priv {
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
struct mdio_device *mdiodev;
+ spinlock_t stats_lock; /* protects cached stats counters */
+ struct delayed_work stats_work;
+ unsigned long stats_last;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index f8b3d53bccad..d0c0c0ec8a80 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -2120,14 +2120,12 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
error_unmap:
- while (!list_empty(&tx_list)) {
- e = list_first_entry(&tx_list, struct airoha_queue_entry,
- list);
+ list_for_each_entry(e, &tx_list, list) {
dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
DMA_TO_DEVICE);
e->dma_addr = 0;
- list_move_tail(&e->list, &q->tx_list);
}
+ list_splice(&tx_list, &q->tx_list);
spin_unlock_bh(&q->lock);
error:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 60b7e53206d1..3d3b09010d48 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -135,11 +135,11 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
-#define XGBE_PTP_ACT_CLK_FREQ 500000000
+#define XGBE_PTP_ACT_CLK_FREQ (NSEC_PER_SEC / XGBE_TSTAMP_SSINC)
#define XGBE_V2_TSTAMP_SSINC 0xA
#define XGBE_V2_TSTAMP_SNSINC 0
-#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+#define XGBE_V2_PTP_ACT_CLK_FREQ (NSEC_PER_SEC / XGBE_V2_TSTAMP_SSINC)
/* Define maximum supported values */
#define XGBE_MAX_PPS_OUT 4
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8c55874f44ca..008c34cff7b4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3825,7 +3825,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2)
return 0;
- bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ bp->max_tpa = min_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ /* Older P5 FW sets max_tpa_v2 low by mistake except NPAR */
+ if (bp->max_tpa <= 32 && BNXT_CHIP_P5(bp) && !BNXT_NPAR(bp))
+ bp->max_tpa = MAX_TPA_P5;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -17360,9 +17363,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
- test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
- msleep(900);
+ if (test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) {
+ /* After DPC, the chip should return CRS when the vendor ID
+ * config register is read until it is ready. On all chips,
+ * this is not happening reliably so add a 5-second delay as a
+ * workaround.
+ */
+ msleep(5000);
+ }
netdev_lock(netdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 53f336db4fcc..5d41dc1bc782 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -419,31 +419,13 @@ void bnxt_ptp_reapply_pps(struct bnxt *bp)
}
}
-static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
- u64 *cycles_delta)
-{
- u64 cycles_now;
- u64 nsec_now, nsec_delta;
- int rc;
-
- rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc)
- return rc;
-
- nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
-
- nsec_delta = target_ns - nsec_now;
- *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
- return 0;
-}
-
static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
struct ptp_clock_request *rq)
{
struct hwrm_func_ptp_cfg_input *req;
struct bnxt *bp = ptp->bp;
struct timespec64 ts;
- u64 target_ns, delta;
+ u64 target_ns;
u16 enables;
int rc;
@@ -451,10 +433,6 @@ static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
ts.tv_nsec = rq->perout.start.nsec;
target_ns = timespec64_to_ns(&ts);
- rc = bnxt_get_target_cycles(ptp, target_ns, &delta);
- if (rc)
- return rc;
-
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
if (rc)
return rc;
@@ -468,7 +446,10 @@ static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
req->ptp_freq_adj_dll_phase = 0;
req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
req->ptp_freq_adj_ext_up = 0;
- req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
+ req->ptp_freq_adj_ext_phase_lower =
+ cpu_to_le32(lower_32_bits(target_ns));
+ req->ptp_freq_adj_ext_phase_upper =
+ cpu_to_le32(upper_32_bits(target_ns));
return hwrm_req_send(bp, req);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 052bf69cfa4c..5c751933da6a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -175,8 +175,14 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+ if (rc) {
+ netdev_err(dev, "Failed to configure dual VNIC mode\n");
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ goto exit;
+ }
+ }
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 4824232f4890..065cbbf52686 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1491,6 +1491,11 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
if (!gpage) {
dev_err(geth->dev, "could not find mapping\n");
+ if (skb) {
+ napi_free_frags(&port->napi);
+ port->stats.rx_dropped++;
+ skb = NULL;
+ }
continue;
}
page = gpage->page;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index e663bb5e614e..e691144e8756 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -330,6 +330,7 @@ struct enetc_si {
struct workqueue_struct *workqueue;
struct work_struct rx_mode_task;
struct dentry *debugfs_root;
+ struct enetc_msg_swbd msg; /* Only valid for VSI */
};
#define ENETC_SI_ALIGN 32
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 6c4b374bcb0e..df8e95cc47d0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -17,11 +17,36 @@ static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
}
+static void enetc_msg_dma_free(struct device *dev, struct enetc_msg_swbd *msg)
+{
+ if (msg->vaddr) {
+ dma_free_coherent(dev, msg->size, msg->vaddr, msg->dma);
+ msg->vaddr = NULL;
+ }
+}
+
static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
{
+ struct device *dev = &si->pdev->dev;
int timeout = 100;
u32 vsimsgsr;
+ /* The VSI mailbox may be busy if last message was not yet processed
+ * by PSI. So need to check the mailbox status before sending.
+ */
+ vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
+ if (vsimsgsr & ENETC_VSIMSGSR_MB) {
+ /* It is safe to free the DMA buffer here, the caller does
+ * not access the DMA buffer if enetc_msg_vsi_send() fails.
+ */
+ enetc_msg_dma_free(dev, msg);
+ dev_err(dev, "VSI mailbox is busy\n");
+ return -EIO;
+ }
+
+ /* Free the DMA buffer of the last message */
+ enetc_msg_dma_free(dev, &si->msg);
+ si->msg = *msg;
enetc_msg_vsi_write_msg(&si->hw, msg);
do {
@@ -32,12 +57,15 @@ static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
usleep_range(1000, 2000);
} while (--timeout);
- if (!timeout)
+ if (!timeout) {
+ dev_err(dev, "VSI mailbox timeout\n");
+
return -ETIMEDOUT;
+ }
/* check for message delivery error */
if (vsimsgsr & ENETC_VSIMSGSR_MS) {
- dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
+ dev_err(dev, "VSI command execute error: %d\n",
ENETC_SIMSGSR_GET_MC(vsimsgsr));
return -EIO;
}
@@ -50,7 +78,6 @@ static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
{
struct enetc_msg_cmd_set_primary_mac *cmd;
struct enetc_msg_swbd msg;
- int err;
msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
@@ -67,11 +94,7 @@ static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
/* send the command and wait */
- err = enetc_msg_vsi_send(priv->si, &msg);
-
- dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
-
- return err;
+ return enetc_msg_vsi_send(priv->si, &msg);
}
static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
@@ -259,6 +282,7 @@ static void enetc_vf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
struct enetc_ndev_priv *priv;
+ struct enetc_msg_swbd msg;
priv = netdev_priv(si->ndev);
unregister_netdev(si->ndev);
@@ -270,7 +294,9 @@ static void enetc_vf_remove(struct pci_dev *pdev)
free_netdev(si->ndev);
+ msg = si->msg;
enetc_pci_remove(pdev);
+ enetc_msg_dma_free(&pdev->dev, &msg);
}
static const struct pci_device_id enetc_vf_id_table[] = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
index 3debf2fae1a4..6f13296303cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
@@ -249,34 +249,21 @@ DEFINE_SHOW_ATTRIBUTE(npc_defrag);
int npc_cn20k_debugfs_init(struct rvu *rvu)
{
struct npc_priv_t *npc_priv = npc_priv_get();
- struct dentry *npc_dentry;
- npc_dentry = debugfs_create_file("mcam_layout", 0444, rvu->rvu_dbg.npc,
- npc_priv, &npc_mcam_layout_fops);
+ debugfs_create_file("mcam_layout", 0444, rvu->rvu_dbg.npc,
+ npc_priv, &npc_mcam_layout_fops);
- if (!npc_dentry)
- return -EFAULT;
+ debugfs_create_file("mcam_default", 0444, rvu->rvu_dbg.npc,
+ rvu, &npc_mcam_default_fops);
- npc_dentry = debugfs_create_file("mcam_default", 0444, rvu->rvu_dbg.npc,
- rvu, &npc_mcam_default_fops);
+ debugfs_create_file("vidx2idx", 0444, rvu->rvu_dbg.npc,
+ npc_priv, &npc_vidx2idx_map_fops);
- if (!npc_dentry)
- return -EFAULT;
+ debugfs_create_file("idx2vidx", 0444, rvu->rvu_dbg.npc,
+ npc_priv, &npc_idx2vidx_map_fops);
- npc_dentry = debugfs_create_file("vidx2idx", 0444, rvu->rvu_dbg.npc,
- npc_priv, &npc_vidx2idx_map_fops);
- if (!npc_dentry)
- return -EFAULT;
-
- npc_dentry = debugfs_create_file("idx2vidx", 0444, rvu->rvu_dbg.npc,
- npc_priv, &npc_idx2vidx_map_fops);
- if (!npc_dentry)
- return -EFAULT;
-
- npc_dentry = debugfs_create_file("defrag", 0444, rvu->rvu_dbg.npc,
- npc_priv, &npc_defrag_fops);
- if (!npc_dentry)
- return -EFAULT;
+ debugfs_create_file("defrag", 0444, rvu->rvu_dbg.npc,
+ npc_priv, &npc_defrag_fops);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
index 7291fdb89b03..6b3f453fd500 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
@@ -798,7 +798,7 @@ program_mkex_extr:
iounmap(mkex_prfl_addr);
}
-void
+int
npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
int index, bool enable)
{
@@ -808,7 +808,12 @@ npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
u64 cfg, hw_prio;
u8 kw_type;
- npc_mcam_idx_2_key_type(rvu, index, &kw_type);
+ if (index < 0 || index >= mcam->total_entries)
+ return -EINVAL;
+
+ if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
+ return -EINVAL;
+
if (kw_type == NPC_MCAM_KEY_X2) {
cfg = rvu_read64(rvu, blkaddr,
NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx,
@@ -819,7 +824,7 @@ npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
rvu_write64(rvu, blkaddr,
NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
cfg);
- return;
+ return 0;
}
/* For NPC_CN20K_MCAM_KEY_X4 keys, both the banks
@@ -836,10 +841,12 @@ npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
cfg);
}
+
+ return 0;
}
-void
-npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr, int bank, int index)
+static void
+npc_clear_x2_entry(struct rvu *rvu, int blkaddr, int bank, int index)
{
rvu_write64(rvu, blkaddr,
NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index, bank, 1),
@@ -873,6 +880,33 @@ npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr, int bank, int index)
NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(index, bank), 0);
}
+int
+npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr, int mcam_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int bank = npc_get_bank(mcam, mcam_idx);
+ u8 kw_type;
+ int index;
+
+ if (npc_mcam_idx_2_key_type(rvu, mcam_idx, &kw_type))
+ return -EINVAL;
+
+ index = mcam_idx & (mcam->banksize - 1);
+
+ if (kw_type == NPC_MCAM_KEY_X2) {
+ npc_clear_x2_entry(rvu, blkaddr, bank, index);
+ return 0;
+ }
+
+ /* For NPC_MCAM_KEY_X4 keys, both the banks
+ * need to be programmed with the same value.
+ */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++)
+ npc_clear_x2_entry(rvu, blkaddr, bank, index);
+
+ return 0;
+}
+
static void npc_cn20k_get_keyword(struct cn20k_mcam_entry *entry, int idx,
u64 *cam0, u64 *cam1)
{
@@ -1014,48 +1048,27 @@ static void npc_cn20k_config_kw_x4(struct rvu *rvu, struct npc_mcam *mcam,
kw, req_kw_type);
}
-static void
-npc_cn20k_set_mcam_bank_cfg(struct rvu *rvu, int blkaddr, int mcam_idx,
- int bank, u8 kw_type, bool enable, u8 hw_prio)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 bank_cfg;
-
- bank_cfg = (u64)hw_prio << 24;
- if (enable)
- bank_cfg |= 0x1;
-
- if (kw_type == NPC_MCAM_KEY_X2) {
- rvu_write64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
- bank_cfg);
- return;
- }
-
- /* For NPC_MCAM_KEY_X4 keys, both the banks
- * need to be programmed with the same value.
- */
- for (bank = 0; bank < mcam->banks_per_entry; bank++) {
- rvu_write64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
- bank_cfg);
- }
-}
-
-void npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
- u8 intf, struct cn20k_mcam_entry *entry,
- bool enable, u8 hw_prio, u8 req_kw_type)
+int npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
+ u8 intf, struct cn20k_mcam_entry *entry,
+ bool enable, u8 hw_prio, u8 req_kw_type)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int mcam_idx = index % mcam->banksize;
int bank = index / mcam->banksize;
+ u64 bank_cfg = (u64)hw_prio << 24;
int kw = 0;
u8 kw_type;
+ if (index < 0 || index >= mcam->total_entries)
+ return -EINVAL;
+
+ if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
+ return -EINVAL;
+
/* Disable before mcam entry update */
- npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, false);
+ if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, false))
+ return -EINVAL;
- npc_mcam_idx_2_key_type(rvu, index, &kw_type);
/* CAM1 takes the comparison value and
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
@@ -1064,7 +1077,7 @@ void npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
*/
if (kw_type == NPC_MCAM_KEY_X2) {
/* Clear mcam entry to avoid writes being suppressed by NPC */
- npc_cn20k_clear_mcam_entry(rvu, blkaddr, bank, mcam_idx);
+ npc_clear_x2_entry(rvu, blkaddr, bank, mcam_idx);
npc_cn20k_config_kw_x2(rvu, mcam, blkaddr,
mcam_idx, intf, entry,
bank, kw_type, kw, req_kw_type);
@@ -1085,44 +1098,55 @@ void npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
bank, 1),
entry->vtag_action);
- goto set_cfg;
- }
-
- /* Clear mcam entry to avoid writes being suppressed by NPC */
- npc_cn20k_clear_mcam_entry(rvu, blkaddr, 0, mcam_idx);
- npc_cn20k_clear_mcam_entry(rvu, blkaddr, 1, mcam_idx);
- npc_cn20k_config_kw_x4(rvu, mcam, blkaddr,
- mcam_idx, intf, entry,
- kw_type, req_kw_type);
- for (bank = 0; bank < mcam->banks_per_entry; bank++) {
- /* Set 'action' */
+ /* Set HW priority */
rvu_write64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
- bank, 0),
- entry->action);
+ NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
+ bank_cfg);
- /* Set TAG 'action' */
- rvu_write64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
- bank, 1),
- entry->vtag_action);
+ } else {
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_x2_entry(rvu, blkaddr, 0, mcam_idx);
+ npc_clear_x2_entry(rvu, blkaddr, 1, mcam_idx);
- /* Set 'action2' for inline receive */
- rvu_write64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
- bank, 2),
- entry->action2);
+ npc_cn20k_config_kw_x4(rvu, mcam, blkaddr,
+ mcam_idx, intf, entry,
+ kw_type, req_kw_type);
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
+ bank, 0),
+ entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
+ bank, 1),
+ entry->vtag_action);
+
+ /* Set 'action2' for inline receive */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
+ bank, 2),
+ entry->action2);
+
+ /* Set HW priority */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
+ bank_cfg);
+ }
}
-set_cfg:
/* TODO: */
/* PF installing VF rule */
- npc_cn20k_set_mcam_bank_cfg(rvu, blkaddr, mcam_idx, bank,
- kw_type, enable, hw_prio);
+ if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, enable))
+ return -EINVAL;
+
+ return 0;
}
-void npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr, u16 src, u16 dest)
+int npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr, u16 src, u16 dest)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 cfg, sreg, dreg, soff, doff;
@@ -1130,12 +1154,20 @@ void npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr, u16 src, u16 dest)
int bank, i, sb, db;
int dbank, sbank;
+ if (src >= mcam->total_entries || dest >= mcam->total_entries)
+ return -EINVAL;
+
dbank = npc_get_bank(mcam, dest);
sbank = npc_get_bank(mcam, src);
- npc_mcam_idx_2_key_type(rvu, src, &src_kwtype);
- npc_mcam_idx_2_key_type(rvu, dest, &dest_kwtype);
+
+ if (npc_mcam_idx_2_key_type(rvu, src, &src_kwtype))
+ return -EINVAL;
+
+ if (npc_mcam_idx_2_key_type(rvu, dest, &dest_kwtype))
+ return -EINVAL;
+
if (src_kwtype != dest_kwtype)
- return;
+ return -EINVAL;
src &= (mcam->banksize - 1);
dest &= (mcam->banksize - 1);
@@ -1170,6 +1202,8 @@ void npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr, u16 src, u16 dest)
if (src_kwtype == NPC_MCAM_KEY_X2)
break;
}
+
+ return 0;
}
static void npc_cn20k_fill_entryword(struct cn20k_mcam_entry *entry, int idx,
@@ -1179,21 +1213,37 @@ static void npc_cn20k_fill_entryword(struct cn20k_mcam_entry *entry, int idx,
entry->kw_mask[idx] = cam1 ^ cam0;
}
-void npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
- struct cn20k_mcam_entry *entry,
- u8 *intf, u8 *ena, u8 *hw_prio)
+int npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
+ struct cn20k_mcam_entry *entry,
+ u8 *intf, u8 *ena, u8 *hw_prio)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 cam0, cam1, bank_cfg, cfg;
int kw = 0, bank;
u8 kw_type;
- npc_mcam_idx_2_key_type(rvu, index, &kw_type);
+ if (index >= mcam->total_entries)
+ return -EINVAL;
+
+ if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
+ return -EINVAL;
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 0));
+ entry->action = cfg;
+
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 1));
+ entry->vtag_action = cfg;
+
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 2));
+ entry->action2 = cfg;
+
+ cfg = rvu_read64(rvu, blkaddr,
NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index,
bank, 1)) & 3;
*intf = cfg;
@@ -1242,7 +1292,7 @@ void npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
bank,
0));
npc_cn20k_fill_entryword(entry, kw + 3, cam0, cam1);
- goto read_action;
+ return 0;
}
for (bank = 0; bank < mcam->banks_per_entry; bank++, kw = kw + 4) {
@@ -1287,17 +1337,7 @@ void npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
npc_cn20k_fill_entryword(entry, kw + 3, cam0, cam1);
}
-read_action:
- /* 'action' is set to same value for both bank '0' and '1'.
- * Hence, reading bank '0' should be enough.
- */
- cfg = rvu_read64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, 0, 0));
- entry->action = cfg;
-
- cfg = rvu_read64(rvu, blkaddr,
- NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, 0, 1));
- entry->vtag_action = cfg;
+ return 0;
}
int rvu_mbox_handler_npc_cn20k_mcam_write_entry(struct rvu *rvu,
@@ -1335,11 +1375,10 @@ int rvu_mbox_handler_npc_cn20k_mcam_write_entry(struct rvu *rvu,
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
- npc_cn20k_config_mcam_entry(rvu, blkaddr, req->entry, nix_intf,
- &req->entry_data, req->enable_entry,
- req->hw_prio, req->req_kw_type);
+ rc = npc_cn20k_config_mcam_entry(rvu, blkaddr, req->entry, nix_intf,
+ &req->entry_data, req->enable_entry,
+ req->hw_prio, req->req_kw_type);
- rc = 0;
exit:
mutex_unlock(&mcam->lock);
return rc;
@@ -1361,11 +1400,13 @@ int rvu_mbox_handler_npc_cn20k_mcam_read_entry(struct rvu *rvu,
mutex_lock(&mcam->lock);
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
- if (!rc)
- npc_cn20k_read_mcam_entry(rvu, blkaddr, req->entry,
- &rsp->entry_data, &rsp->intf,
- &rsp->enable, &rsp->hw_prio);
+ if (rc)
+ goto fail;
+ rc = npc_cn20k_read_mcam_entry(rvu, blkaddr, req->entry,
+ &rsp->entry_data, &rsp->intf,
+ &rsp->enable, &rsp->hw_prio);
+fail:
mutex_unlock(&mcam->lock);
return rc;
}
@@ -1375,11 +1416,13 @@ int rvu_mbox_handler_npc_cn20k_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_rsp *rsp)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npc_mcam_free_entry_req free_req = { 0 };
struct npc_mcam_alloc_entry_req entry_req;
struct npc_mcam_alloc_entry_rsp entry_rsp;
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 entry = NPC_MCAM_ENTRY_INVALID;
- int blkaddr, rc;
+ struct msg_rsp free_rsp;
+ int blkaddr, rc, err;
u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1415,12 +1458,23 @@ int rvu_mbox_handler_npc_cn20k_mcam_alloc_and_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- npc_cn20k_config_mcam_entry(rvu, blkaddr, entry, nix_intf,
- &req->entry_data, req->enable_entry,
- req->hw_prio, req->req_kw_type);
+ rc = npc_cn20k_config_mcam_entry(rvu, blkaddr, entry, nix_intf,
+ &req->entry_data, req->enable_entry,
+ req->hw_prio, req->req_kw_type);
mutex_unlock(&mcam->lock);
+ if (rc) {
+ free_req.hdr.pcifunc = req->hdr.pcifunc;
+ free_req.entry = entry_rsp.entry;
+ err = rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &free_rsp);
+ if (err)
+ dev_err(rvu->dev,
+ "%s: Error to free mcam idx %u\n",
+ __func__, entry_rsp.entry);
+ return rc;
+ }
+
rsp->entry = entry_rsp.entry;
return 0;
}
@@ -1480,9 +1534,9 @@ int rvu_mbox_handler_npc_cn20k_read_base_steer_rule(struct rvu *rvu,
read_entry:
/* Read the mcam entry */
- npc_cn20k_read_mcam_entry(rvu, blkaddr, index,
- &rsp->entry, &intf,
- &enable, &hw_prio);
+ rc = npc_cn20k_read_mcam_entry(rvu, blkaddr, index,
+ &rsp->entry, &intf,
+ &enable, &hw_prio);
mutex_unlock(&mcam->lock);
out:
return rc;
@@ -2305,6 +2359,7 @@ err2:
__npc_subbank_mark_free(rvu, sb);
err1:
kfree(save);
+ *alloc_cnt = 0;
return rc;
}
@@ -3482,7 +3537,7 @@ static int npc_defrag_alloc_free_slots(struct rvu *rvu,
{
int alloc_cnt1, alloc_cnt2;
struct npc_subbank *sb;
- int rc, sb_off, i;
+ int rc, sb_off, i, err;
bool deleted;
sb = &npc_priv.sb[f->idx];
@@ -3496,6 +3551,7 @@ static int npc_defrag_alloc_free_slots(struct rvu *rvu,
NPC_MCAM_LOWER_PRIO,
false, cnt, save, cnt, true,
&alloc_cnt1);
+
if (alloc_cnt1 < cnt) {
rc = __npc_subbank_alloc(rvu, sb,
NPC_MCAM_KEY_X2, sb->b1b,
@@ -3511,15 +3567,17 @@ static int npc_defrag_alloc_free_slots(struct rvu *rvu,
dev_err(rvu->dev,
"%s: Failed to alloc cnt=%u alloc_cnt1=%u alloc_cnt2=%u\n",
__func__, cnt, alloc_cnt1, alloc_cnt2);
+ rc = -ENOSPC;
goto fail_free_alloc;
}
+
return 0;
fail_free_alloc:
for (i = 0; i < alloc_cnt1 + alloc_cnt2; i++) {
- rc = npc_mcam_idx_2_subbank_idx(rvu, save[i],
- &sb, &sb_off);
- if (rc) {
+ err = npc_mcam_idx_2_subbank_idx(rvu, save[i],
+ &sb, &sb_off);
+ if (err) {
dev_err(rvu->dev,
"%s: Error to find subbank for mcam idx=%u\n",
__func__, save[i]);
@@ -3565,9 +3623,10 @@ int npc_defrag_move_vdx_to_free(struct rvu *rvu,
struct npc_defrag_node *v,
int cnt, u16 *save)
{
+ u16 new_midx, old_midx, vidx, target_pf;
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int i, vidx_cnt, rc, sb_off;
- u16 new_midx, old_midx, vidx;
struct npc_subbank *sb;
bool deleted;
u16 pcifunc;
@@ -3607,9 +3666,30 @@ int npc_defrag_move_vdx_to_free(struct rvu *rvu,
NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(midx,
bank));
- npc_cn20k_enable_mcam_entry(rvu, blkaddr, old_midx, false);
- npc_cn20k_copy_mcam_entry(rvu, blkaddr, old_midx, new_midx);
- npc_cn20k_enable_mcam_entry(rvu, blkaddr, new_midx, true);
+ /* If bug happened during copy/enable mcam, then there is a bug in allocation
+ * algorithm itself. There is no point in rewinding and returning, as it
+ * will face further issue. Return error after printing error
+ */
+ if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, old_midx, false)) {
+ dev_err(rvu->dev,
+ "%s: Error happened while disabling old_mid=%u\n",
+ __func__, old_midx);
+ return -EFAULT;
+ }
+
+ if (npc_cn20k_copy_mcam_entry(rvu, blkaddr, old_midx, new_midx)) {
+ dev_err(rvu->dev,
+ "%s: Error happened while copying old_midx=%u new_midx=%u\n",
+ __func__, old_midx, new_midx);
+ return -EFAULT;
+ }
+
+ if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, new_midx, true)) {
+ dev_err(rvu->dev,
+ "%s: Error happened while enabling new_mid=%u\n",
+ __func__, new_midx);
+ return -EFAULT;
+ }
midx = new_midx % mcam->banksize;
bank = new_midx / mcam->banksize;
@@ -3665,8 +3745,21 @@ int npc_defrag_move_vdx_to_free(struct rvu *rvu,
mcam->entry2pfvf_map[new_midx] = pcifunc;
/* Counter is not preserved */
mcam->entry2cntr_map[new_midx] = new_midx;
+ target_pf = mcam->entry2target_pffunc[old_midx];
+ mcam->entry2target_pffunc[new_midx] = target_pf;
+ mcam->entry2target_pffunc[old_midx] = NPC_MCAM_INVALID_MAP;
+
npc_mcam_set_bit(mcam, new_midx);
+ /* Note: list order is not functionally required for mcam_rules */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->entry != old_midx)
+ continue;
+
+ rule->entry = new_midx;
+ break;
+ }
+
/* Mark as invalid */
v->vidx[vidx_cnt - i - 1] = -1;
save[cnt - i - 1] = -1;
@@ -3935,6 +4028,13 @@ int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
void *val;
int i, j;
+ for (i = 0; i < ARRAY_SIZE(ptr); i++) {
+ if (!ptr[i])
+ continue;
+
+ *ptr[i] = USHRT_MAX;
+ }
+
if (!npc_priv.init_done)
return 0;
@@ -3950,7 +4050,6 @@ int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
npc_dft_rule_name[NPC_DFT_RULE_PROMISC_ID],
pcifunc);
- *ptr[0] = USHRT_MAX;
return -ESRCH;
}
@@ -3970,7 +4069,6 @@ int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
npc_dft_rule_name[NPC_DFT_RULE_UCAST_ID],
pcifunc);
- *ptr[3] = USHRT_MAX;
return -ESRCH;
}
@@ -3990,7 +4088,6 @@ int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
__func__,
npc_dft_rule_name[i], pcifunc);
- *ptr[j] = USHRT_MAX;
continue;
}
@@ -4085,7 +4182,7 @@ int rvu_mbox_handler_npc_get_dft_rl_idxs(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc)
+bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc)
{
return is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)) ||
is_lbk_vf(rvu, pcifunc);
@@ -4093,11 +4190,11 @@ static bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc)
void npc_cn20k_dft_rules_free(struct rvu *rvu, u16 pcifunc)
{
- struct npc_mcam_free_entry_req free_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 ptr[4] = {[0 ... 3] = USHRT_MAX};
+ struct rvu_npc_mcam_rule *rule, *tmp;
unsigned long index;
- struct msg_rsp rsp;
- u16 ptr[4];
- int rc, i;
+ int blkaddr, rc, i;
void *map;
if (!npc_priv.init_done)
@@ -4155,14 +4252,43 @@ void npc_cn20k_dft_rules_free(struct rvu *rvu, u16 pcifunc)
}
free_rules:
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+ for (int i = 0; i < 4; i++) {
+ if (ptr[i] == USHRT_MAX)
+ continue;
- free_req.hdr.pcifunc = pcifunc;
- free_req.all = 1;
- rc = rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
- if (rc)
- dev_err(rvu->dev,
- "%s: Error deleting default entries (pcifunc=%#x\n",
- __func__, pcifunc);
+ mutex_lock(&mcam->lock);
+ npc_mcam_clear_bit(mcam, ptr[i]);
+ mcam->entry2pfvf_map[ptr[i]] = NPC_MCAM_INVALID_MAP;
+ npc_cn20k_enable_mcam_entry(rvu, blkaddr, ptr[i], false);
+ mcam->entry2target_pffunc[ptr[i]] = 0x0;
+ mutex_unlock(&mcam->lock);
+
+ rc = npc_cn20k_idx_free(rvu, &ptr[i], 1);
+ if (rc) {
+ /* Non recoverable error. Let us WARN and return. Keep system alive to
+ * enable debugging
+ */
+ WARN(1, "%s Error deleting default entries (pcifunc=%#x) mcam_idx=%u\n",
+ __func__, pcifunc, ptr[i]);
+ return;
+ }
+ }
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ for (int i = 0; i < 4; i++) {
+ if (ptr[i] != rule->entry)
+ continue;
+
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
}
int npc_cn20k_dft_rules_alloc(struct rvu *rvu, u16 pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
index 815d0b257a7e..3d5eb952cc07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.h
@@ -320,21 +320,21 @@ void npc_cn20k_dft_rules_free(struct rvu *rvu, u16 pcifunc);
int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
u16 *mcast, u16 *promisc, u16 *ucast);
-void npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
- u8 intf, struct cn20k_mcam_entry *entry,
- bool enable, u8 hw_prio, u8 req_kw_type);
-void npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
- int index, bool enable);
-void npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr,
- u16 src, u16 dest);
-void npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
- struct cn20k_mcam_entry *entry, u8 *intf,
- u8 *ena, u8 *hw_prio);
-void npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr,
- int bank, int index);
+int npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
+ u8 intf, struct cn20k_mcam_entry *entry,
+ bool enable, u8 hw_prio, u8 req_kw_type);
+int npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
+ int index, bool enable);
+int npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr,
+ u16 src, u16 dest);
+int npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
+ struct cn20k_mcam_entry *entry, u8 *intf,
+ u8 *ena, u8 *hw_prio);
+int npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr, int index);
int npc_mcam_idx_2_key_type(struct rvu *rvu, u16 mcam_idx, u8 *key_type);
u16 npc_cn20k_vidx2idx(u16 index);
u16 npc_cn20k_idx2vidx(u16 idx);
int npc_cn20k_defrag(struct rvu *rvu);
+bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc);
#endif /* NPC_CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index ef5b081162eb..f977734ae712 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -3577,6 +3577,9 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
mcam_index = npc_get_nixlf_mcam_index(mcam,
pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, type);
+ if (mcam_index < 0)
+ return -EINVAL;
+
err = nix_update_mce_list(rvu, pcifunc, mce_list,
mce_idx, mcam_index, add);
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c2ca5ed1d028..3c814d157ab9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -163,14 +163,35 @@ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
if (rc)
return -EFAULT;
+ if (is_lbk_vf(rvu, pcifunc)) {
+ if (promisc == USHRT_MAX)
+ return -EINVAL;
+ return promisc;
+ }
+
+ if (is_cgx_vf(rvu, pcifunc)) {
+ if (ucast == USHRT_MAX)
+ return -EINVAL;
+
+ return ucast;
+ }
+
switch (type) {
case NIXLF_BCAST_ENTRY:
+ if (bcast == USHRT_MAX)
+ return -EINVAL;
return bcast;
case NIXLF_ALLMULTI_ENTRY:
+ if (mcast == USHRT_MAX)
+ return -EINVAL;
return mcast;
case NIXLF_PROMISC_ENTRY:
+ if (promisc == USHRT_MAX)
+ return -EINVAL;
return promisc;
case NIXLF_UCAST_ENTRY:
+ if (ucast == USHRT_MAX)
+ return -EINVAL;
return ucast;
default:
return -EINVAL;
@@ -238,10 +259,10 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int actbank = bank;
if (is_cn20k(rvu->pdev)) {
- if (index < 0 || index >= mcam->banksize * mcam->banks)
- return;
-
- return npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, enable);
+ if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, enable))
+ dev_err(rvu->dev, "Error to %s mcam %u entry\n",
+ enable ? "enable" : "disable", index);
+ return;
}
index &= (mcam->banksize - 1);
@@ -258,6 +279,13 @@ static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int bank = npc_get_bank(mcam, index);
int actbank = bank;
+ if (is_cn20k(rvu->pdev)) {
+ if (npc_cn20k_clear_mcam_entry(rvu, blkaddr, index))
+ dev_err(rvu->dev, "%s Failed to clear mcam %u\n",
+ __func__, index);
+ return;
+ }
+
index &= (mcam->banksize - 1);
for (; bank < (actbank + mcam->banks_per_entry); bank++) {
rvu_write64(rvu, blkaddr,
@@ -424,6 +452,15 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
NIXLF_UCAST_ENTRY);
+
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: failed to get ucast entry pcifunc:0x%x\n",
+ __func__, pf_func);
+ /* Action 0 is drop */
+ return 0;
+ }
+
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -589,8 +626,8 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
}
-static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, u16 src, u16 dest)
+static int npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
{
int dbank = npc_get_bank(mcam, dest);
int sbank = npc_get_bank(mcam, src);
@@ -630,6 +667,7 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+ return 0;
}
u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
@@ -689,6 +727,12 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
@@ -744,16 +788,38 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
+ /* In cn20k, default indexes are installed only for CGX mapped
+ * and lbk interfaces
+ */
if (is_cgx_vf(rvu, pcifunc))
index = npc_get_nixlf_mcam_index(mcam,
pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, NIXLF_PROMISC_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get promisc entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
+
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
+ * Please note that for lbk(s) "index" and "ucast_idx"
+ * will be same.
*/
- ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
+ if (is_lbk_vf(rvu, pcifunc))
+ ucast_idx = index;
+ else
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (ucast_idx < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast/promisc entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
+
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
@@ -827,6 +893,14 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
+
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get promisc entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
+
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -867,6 +941,12 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get bcast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
if (!hw->cap.nix_rx_multicast) {
/* Early silicon doesn't support pkt replication,
@@ -931,12 +1011,25 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get mcast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
/* If the corresponding PF's ucast action is RSS,
* use the same action for multicast entry also
*/
ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
+ if (ucast_idx < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
+
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
@@ -1001,6 +1094,13 @@ void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_ALLMULTI_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get mcast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
+
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -1113,8 +1213,12 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
index = mcam_index;
}
- if (index >= mcam->total_entries)
+ if (index < 0 || index >= mcam->total_entries) {
+ dev_err(rvu->dev,
+ "%s: Invalid mcam index, pcifunc=%#x\n",
+ __func__, pcifunc);
return;
+ }
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -1158,16 +1262,18 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
- blkaddr, alg_idx);
+ if (index >= 0)
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
+ blkaddr, alg_idx);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
/* If PF's allmulti entry is enabled,
* Set RSS action for that entry as well
*/
- npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
- blkaddr, alg_idx);
+ if (index >= 0)
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
+ blkaddr, alg_idx);
}
}
@@ -1180,12 +1286,22 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
int index, blkaddr, mce_idx;
struct rvu_pfvf *pfvf;
+ /* multicast pkt replication is not enabled for AF's VFs & SDP links */
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
+ return;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, type);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get entry for pcifunc=%#x, type=%u\n",
+ __func__, pcifunc, type);
+ return;
+ }
/* disable MCAM entry when packet replication is not supported by hw */
if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
@@ -1214,6 +1330,10 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
+ /* only CGX or LBK interfaces have default entries */
+ if (is_cn20k(rvu->pdev) && !npc_is_cgx_or_lbk(rvu, pcifunc))
+ return;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
@@ -1223,6 +1343,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
pfvf->nix_rx_intf)) {
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
+ if (index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -2504,33 +2630,58 @@ void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc)
{
+ u16 dft_idxs[NPC_DFT_RULE_MAX_ID] = {[0 ... NPC_DFT_RULE_MAX_ID - 1] = USHRT_MAX};
+ bool cn20k_dft_rl;
u16 index, cntr;
int rc;
+ npc_cn20k_dft_rules_idx_get(rvu, pcifunc,
+ &dft_idxs[NPC_DFT_RULE_BCAST_ID],
+ &dft_idxs[NPC_DFT_RULE_MCAST_ID],
+ &dft_idxs[NPC_DFT_RULE_PROMISC_ID],
+ &dft_idxs[NPC_DFT_RULE_UCAST_ID]);
+
/* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
for (index = 0; index < mcam->bmap_entries; index++) {
- if (mcam->entry2pfvf_map[index] == pcifunc) {
+ if (mcam->entry2pfvf_map[index] != pcifunc)
+ continue;
+
+ cn20k_dft_rl = false;
+
+ if (is_cn20k(rvu->pdev)) {
+ if (dft_idxs[NPC_DFT_RULE_BCAST_ID] == index ||
+ dft_idxs[NPC_DFT_RULE_MCAST_ID] == index ||
+ dft_idxs[NPC_DFT_RULE_PROMISC_ID] == index ||
+ dft_idxs[NPC_DFT_RULE_UCAST_ID] == index) {
+ cn20k_dft_rl = true;
+ }
+ }
+
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ if (!cn20k_dft_rl) {
mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
/* Free the entry in bitmap */
npc_mcam_clear_bit(mcam, index);
- /* Disable the entry */
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
-
- /* Update entry2counter mapping */
- cntr = mcam->entry2cntr_map[index];
- if (cntr != NPC_MCAM_INVALID_MAP)
- npc_unmap_mcam_entry_and_cntr(rvu, mcam,
- blkaddr, index,
- cntr);
mcam->entry2target_pffunc[index] = 0x0;
- if (is_cn20k(rvu->pdev)) {
- rc = npc_cn20k_idx_free(rvu, &index, 1);
- if (rc)
- dev_err(rvu->dev,
- "Failed to free mcam idx=%u pcifunc=%#x\n",
- index, pcifunc);
- }
}
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+
+ if (!is_cn20k(rvu->pdev) || cn20k_dft_rl)
+ continue;
+
+ rc = npc_cn20k_idx_free(rvu, &index, 1);
+ if (rc)
+ dev_err(rvu->dev,
+ "Failed to free mcam idx=%u pcifunc=%#x\n",
+ index, pcifunc);
}
}
@@ -3266,7 +3417,10 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
/* Copy rule from old entry to new entry */
- npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+ if (npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ break;
+ }
/* Copy counter mapping, if any */
cntr = mcam->entry2cntr_map[old_entry];
@@ -3284,7 +3438,8 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
/* If shift has failed then report the failed index */
if (index != req->shift_count) {
- rc = NPC_MCAM_PERM_DENIED;
+ if (!rc)
+ rc = NPC_MCAM_PERM_DENIED;
rsp->failed_entry_idx = index;
}
@@ -3851,6 +4006,12 @@ int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
/* Read the default ucast entry if there is no pkt steering rule */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_UCAST_ENTRY);
+ if (index < 0) {
+ mutex_unlock(&mcam->lock);
+ rc = NIX_AF_ERR_AF_LF_INVALID;
+ goto out;
+ }
+
read_entry:
/* Read the mcam entry */
npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
@@ -3924,6 +4085,12 @@ void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
+ if (ucast_idx < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast entry for pcifunc=%#x\n",
+ __func__, pcifunc);
+ return;
+ }
npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index b45798d9fdab..6ae9cdcb608b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1444,7 +1444,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
struct msg_rsp write_rsp;
struct mcam_entry *entry;
bool new = false;
- u16 entry_index;
+ int entry_index;
int err;
installed_features = req->features;
@@ -1477,6 +1477,14 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
if (req->default_rule) {
entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
NIXLF_UCAST_ENTRY);
+
+ if (entry_index < 0) {
+ dev_err(rvu->dev,
+ "%s: Error to get ucast entry for target=%#x\n",
+ __func__, target);
+ return -EINVAL;
+ }
+
enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
}
@@ -1980,13 +1988,15 @@ static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
- if (is_cn20k(rvu->pdev))
- npc_cn20k_read_mcam_entry(rvu, npcblkaddr, rule->entry,
- cn20k_entry, &intf,
- &enable, &hw_prio);
- else
+ if (is_cn20k(rvu->pdev)) {
+ if (npc_cn20k_read_mcam_entry(rvu, npcblkaddr, rule->entry,
+ cn20k_entry, &intf,
+ &enable, &hw_prio))
+ return -EINVAL;
+ } else {
npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
entry, &intf, &enable);
+ }
npc_update_entry(rvu, NPC_DMAC, &mdata,
ether_addr_to_u64(pfvf->mac_addr), 0,
@@ -2038,8 +2048,12 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
continue;
}
- if (rule->vfvlan_cfg)
- npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+ if (rule->vfvlan_cfg) {
+ if (npc_update_dmac_value(rvu, blkaddr, rule, pfvf))
+ dev_err(rvu->dev,
+ "Update dmac failed for %u, target=%#x\n",
+ rule->entry, target);
+ }
if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
if (!def_ucast_rule)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
index 6a50b6dec0fa..d9adb993e64d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -1070,29 +1070,37 @@ static struct psp_dev_ops mlx5_psp_ops = {
void mlx5e_psp_unregister(struct mlx5e_priv *priv)
{
- if (!priv->psp || !priv->psp->psp)
+ struct mlx5e_psp *psp = priv->psp;
+
+ if (!psp || !psp->psp)
return;
- psp_dev_unregister(priv->psp->psp);
+ psp_dev_unregister(psp->psp);
+ psp->psp = NULL;
}
void mlx5e_psp_register(struct mlx5e_priv *priv)
{
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_dev *psd;
+
/* FW Caps missing */
if (!priv->psp)
return;
- priv->psp->caps.assoc_drv_spc = sizeof(u32);
- priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
+ psp->caps.assoc_drv_spc = sizeof(u32);
+ psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
- priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
+ psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
- priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
- &priv->psp->caps, NULL);
- if (IS_ERR(priv->psp->psp))
+ psd = psp_dev_create(priv->netdev, &mlx5_psp_ops, &psp->caps, NULL);
+ if (IS_ERR(psd)) {
mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
- priv->psp->psp);
+ psd);
+ return;
+ }
+ psp->psp = psd;
}
int mlx5e_psp_init(struct mlx5e_priv *priv)
@@ -1131,22 +1139,18 @@ int mlx5e_psp_init(struct mlx5e_priv *priv)
if (!psp)
return -ENOMEM;
- priv->psp = psp;
fs = mlx5e_accel_psp_fs_init(priv);
if (IS_ERR(fs)) {
err = PTR_ERR(fs);
- goto out_err;
+ kfree(psp);
+ return err;
}
psp->fs = fs;
+ priv->psp = psp;
mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
return 0;
-
-out_err:
- priv->psp = NULL;
- kfree(psp);
- return err;
}
void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a46870c4b74..8f2b3abe0092 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -6023,7 +6023,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (take_rtnl)
rtnl_lock();
- mlx5e_psp_register(priv);
/* update XDP supported features */
mlx5e_set_xdp_feature(priv);
@@ -6036,7 +6035,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
- mlx5e_psp_unregister(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_psp_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
@@ -6160,6 +6158,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_fs_init_l2_addr(priv->fs, netdev);
mlx5e_ipsec_init(priv);
+ mlx5e_psp_register(priv);
err = mlx5e_macsec_init(priv);
if (err)
@@ -6230,6 +6229,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
+ mlx5e_psp_unregister(priv);
mlx5e_ipsec_cleanup(priv);
}
@@ -6774,9 +6774,11 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return err;
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
- if (actual_adev)
- return _mlx5e_resume(actual_adev);
- return 0;
+ if (actual_adev) {
+ err = _mlx5e_resume(actual_adev);
+ mlx5_sd_put_adev(actual_adev, adev);
+ }
+ return err;
}
static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
@@ -6815,6 +6817,8 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
err = _mlx5e_suspend(actual_adev, false);
mlx5_sd_cleanup(mdev);
+ if (actual_adev)
+ mlx5_sd_put_adev(actual_adev, adev);
return err;
}
@@ -6912,9 +6916,19 @@ static int mlx5e_probe(struct auxiliary_device *adev,
return err;
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
- if (actual_adev)
- return _mlx5e_probe(actual_adev);
+ if (actual_adev) {
+ err = _mlx5e_probe(actual_adev);
+ if (err)
+ goto sd_cleanup;
+ mlx5_sd_put_adev(actual_adev, adev);
+ }
return 0;
+
+sd_cleanup:
+ mlx5_sd_cleanup(mdev);
+ if (actual_adev)
+ mlx5_sd_put_adev(actual_adev, adev);
+ return err;
}
static void _mlx5e_remove(struct auxiliary_device *adev)
@@ -6966,6 +6980,8 @@ static void mlx5e_remove(struct auxiliary_device *adev)
_mlx5e_remove(actual_adev);
mlx5_sd_cleanup(mdev);
+ if (actual_adev)
+ mlx5_sd_put_adev(actual_adev, adev);
}
static const struct auxiliary_device_id mlx5e_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index 762c783156b4..6e199161b008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -18,6 +18,7 @@ struct mlx5_sd {
u8 host_buses;
struct mlx5_devcom_comp_dev *devcom;
struct dentry *dfs;
+ u8 state;
bool primary;
union {
struct { /* primary */
@@ -31,6 +32,11 @@ struct mlx5_sd {
};
};
+enum mlx5_sd_state {
+ MLX5_SD_STATE_DOWN = 0,
+ MLX5_SD_STATE_UP,
+};
+
static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev)
{
struct mlx5_sd *sd = mlx5_get_sd(dev);
@@ -270,9 +276,6 @@ static void sd_unregister(struct mlx5_core_dev *dev)
{
struct mlx5_sd *sd = mlx5_get_sd(dev);
- mlx5_devcom_comp_lock(sd->devcom);
- mlx5_devcom_comp_set_ready(sd->devcom, false);
- mlx5_devcom_comp_unlock(sd->devcom);
mlx5_devcom_unregister_component(sd->devcom);
}
@@ -426,6 +429,7 @@ int mlx5_sd_init(struct mlx5_core_dev *dev)
struct mlx5_core_dev *primary, *pos, *to;
struct mlx5_sd *sd = mlx5_get_sd(dev);
u8 alias_key[ACCESS_KEY_LEN];
+ struct mlx5_sd *primary_sd;
int err, i;
err = sd_init(dev);
@@ -440,10 +444,17 @@ int mlx5_sd_init(struct mlx5_core_dev *dev)
if (err)
goto err_sd_cleanup;
+ mlx5_devcom_comp_lock(sd->devcom);
if (!mlx5_devcom_comp_is_ready(sd->devcom))
- return 0;
+ goto out;
primary = mlx5_sd_get_primary(dev);
+ if (!primary)
+ goto out;
+
+ primary_sd = mlx5_get_sd(primary);
+ if (primary_sd->state != MLX5_SD_STATE_DOWN)
+ goto out;
for (i = 0; i < ACCESS_KEY_LEN; i++)
alias_key[i] = get_random_u8();
@@ -452,9 +463,13 @@ int mlx5_sd_init(struct mlx5_core_dev *dev)
if (err)
goto err_sd_unregister;
- sd->dfs = debugfs_create_dir("multi-pf", mlx5_debugfs_get_dev_root(primary));
- debugfs_create_x32("group_id", 0400, sd->dfs, &sd->group_id);
- debugfs_create_file("primary", 0400, sd->dfs, primary, &dev_fops);
+ primary_sd->dfs =
+ debugfs_create_dir("multi-pf",
+ mlx5_debugfs_get_dev_root(primary));
+ debugfs_create_x32("group_id", 0400, primary_sd->dfs,
+ &primary_sd->group_id);
+ debugfs_create_file("primary", 0400, primary_sd->dfs, primary,
+ &dev_fops);
mlx5_sd_for_each_secondary(i, primary, pos) {
char name[32];
@@ -464,7 +479,8 @@ int mlx5_sd_init(struct mlx5_core_dev *dev)
goto err_unset_secondaries;
snprintf(name, sizeof(name), "secondary_%d", i - 1);
- debugfs_create_file(name, 0400, sd->dfs, pos, &dev_fops);
+ debugfs_create_file(name, 0400, primary_sd->dfs, pos,
+ &dev_fops);
}
@@ -472,6 +488,9 @@ int mlx5_sd_init(struct mlx5_core_dev *dev)
sd->group_id, mlx5_devcom_comp_get_size(sd->devcom));
sd_print_group(primary);
+ primary_sd->state = MLX5_SD_STATE_UP;
+out:
+ mlx5_devcom_comp_unlock(sd->devcom);
return 0;
err_unset_secondaries:
@@ -479,8 +498,18 @@ err_unset_secondaries:
mlx5_sd_for_each_secondary_to(i, primary, to, pos)
sd_cmd_unset_secondary(pos);
sd_cmd_unset_primary(primary);
- debugfs_remove_recursive(sd->dfs);
+ debugfs_remove_recursive(primary_sd->dfs);
+ primary_sd->dfs = NULL;
err_sd_unregister:
+ mlx5_sd_for_each_secondary(i, primary, pos) {
+ struct mlx5_sd *peer_sd = mlx5_get_sd(pos);
+
+ primary_sd->secondaries[i - 1] = NULL;
+ peer_sd->primary_dev = NULL;
+ }
+ primary_sd->primary = false;
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
sd_unregister(dev);
err_sd_cleanup:
sd_cleanup(dev);
@@ -491,42 +520,97 @@ void mlx5_sd_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_sd *sd = mlx5_get_sd(dev);
struct mlx5_core_dev *primary, *pos;
+ struct mlx5_sd *primary_sd;
int i;
if (!sd)
return;
+ mlx5_devcom_comp_lock(sd->devcom);
if (!mlx5_devcom_comp_is_ready(sd->devcom))
- goto out;
+ goto out_unlock;
primary = mlx5_sd_get_primary(dev);
+ if (!primary)
+ goto out_ready_false;
+
+ primary_sd = mlx5_get_sd(primary);
+ if (primary_sd->state != MLX5_SD_STATE_UP)
+ goto out_clear_peers;
+
mlx5_sd_for_each_secondary(i, primary, pos)
sd_cmd_unset_secondary(pos);
sd_cmd_unset_primary(primary);
- debugfs_remove_recursive(sd->dfs);
+ debugfs_remove_recursive(primary_sd->dfs);
+ primary_sd->dfs = NULL;
sd_info(primary, "group id %#x, uncombined\n", sd->group_id);
-out:
+ primary_sd->state = MLX5_SD_STATE_DOWN;
+out_clear_peers:
+ mlx5_sd_for_each_secondary(i, primary, pos) {
+ struct mlx5_sd *peer_sd = mlx5_get_sd(pos);
+
+ primary_sd->secondaries[i - 1] = NULL;
+ peer_sd->primary_dev = NULL;
+ }
+ primary_sd->primary = false;
+out_ready_false:
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+out_unlock:
+ mlx5_devcom_comp_unlock(sd->devcom);
sd_unregister(dev);
sd_cleanup(dev);
}
+/* Lock order:
+ * primary: actual_adev_lock -> SD devcom comp lock
+ * secondary: SD devcom comp lock -> (drop) -> actual_adev_lock
+ * The two locks are never held together, so no ABBA.
+ */
struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
struct auxiliary_device *adev,
int idx)
{
struct mlx5_sd *sd = mlx5_get_sd(dev);
struct mlx5_core_dev *primary;
+ struct mlx5_adev *primary_adev;
if (!sd)
return adev;
- if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ mlx5_devcom_comp_lock(sd->devcom);
+ if (!mlx5_devcom_comp_is_ready(sd->devcom)) {
+ mlx5_devcom_comp_unlock(sd->devcom);
return NULL;
+ }
primary = mlx5_sd_get_primary(dev);
- if (dev == primary)
+ if (!primary || dev == primary) {
+ mlx5_devcom_comp_unlock(sd->devcom);
return adev;
+ }
+
+ primary_adev = primary->priv.adev[idx];
+ get_device(&primary_adev->adev.dev);
+ mlx5_devcom_comp_unlock(sd->devcom);
+
+ device_lock(&primary_adev->adev.dev);
+ /* Primary may have completed remove between dropping devcom and
+ * acquiring device_lock; recheck.
+ */
+ if (!mlx5_devcom_comp_is_ready(sd->devcom)) {
+ device_unlock(&primary_adev->adev.dev);
+ put_device(&primary_adev->adev.dev);
+ return NULL;
+ }
+ return &primary_adev->adev;
+}
- return &primary->priv.adev[idx]->adev;
+void mlx5_sd_put_adev(struct auxiliary_device *actual_adev,
+ struct auxiliary_device *adev)
+{
+ if (actual_adev != adev) {
+ device_unlock(&actual_adev->dev);
+ put_device(&actual_adev->dev);
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
index 137efaf9aabc..9bfd5b9756b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
@@ -15,6 +15,8 @@ struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int c
struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
struct auxiliary_device *adev,
int idx);
+void mlx5_sd_put_adev(struct auxiliary_device *actual_adev,
+ struct auxiliary_device *adev);
int mlx5_sd_init(struct mlx5_core_dev *dev);
void mlx5_sd_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index c406a3b56b37..4dea2bb58d2f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -826,7 +826,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netif_tx_stop_all_queues(netdev);
if (fbnic_phylink_create(netdev)) {
- fbnic_netdev_free(fbd);
+ free_netdev(netdev);
+ fbd->netdev = NULL;
return NULL;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6a745bb71b5c..eb57b86fbe22 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -31,11 +31,11 @@ enum spx5_target_chiptype {
SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
- SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
- SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
- SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
- SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
- SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+ SPX5_TARGET_CT_7546TSN = 0x0546, /* SparX-5-64i Industrial */
+ SPX5_TARGET_CT_7549TSN = 0x0549, /* SparX-5-90i Industrial */
+ SPX5_TARGET_CT_7552TSN = 0x0552, /* SparX-5-128i Industrial */
+ SPX5_TARGET_CT_7556TSN = 0x0556, /* SparX-5-160i Industrial */
+ SPX5_TARGET_CT_7558TSN = 0x0558, /* SparX-5-200i Industrial */
SPX5_TARGET_CT_LAN9694 = 0x9694, /* lan969x-40 */
SPX5_TARGET_CT_LAN9691VAO = 0x9691, /* lan969x-40-VAO */
SPX5_TARGET_CT_LAN9694TSN = 0x9695, /* lan969x-40-TSN */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 04bc8fffaf96..62c49893de3c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -1128,7 +1128,8 @@ int sparx5_port_init(struct sparx5 *sparx5,
DEV2G5_PCS1G_SD_CFG(port->portno));
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
- conf->portmode == PHY_INTERFACE_MODE_SGMII) {
+ conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_1000BASEX) {
err = sparx5_serdes_set(sparx5, port, conf);
if (err)
return err;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 098fbda0d128..d8e816882f02 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -43,8 +43,9 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
static int mana_gd_init_pf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- void __iomem *sriov_base_va;
+ u64 remaining_barsize;
u64 sriov_base_off;
+ u64 sriov_shm_off;
gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
@@ -73,10 +74,28 @@ static int mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->phys_db_page_base = gc->bar0_pa + gc->db_page_off;
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+ if (sriov_base_off >= gc->bar0_size ||
+ gc->bar0_size - sriov_base_off <
+ GDMA_PF_REG_SHM_OFF + sizeof(u64) ||
+ !IS_ALIGNED(sriov_base_off, sizeof(u64))) {
+ dev_err(gc->dev,
+ "SRIOV base offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
+ sriov_base_off, (u64)gc->bar0_size);
+ return -EPROTO;
+ }
- sriov_base_va = gc->bar0_va + sriov_base_off;
- gc->shm_base = sriov_base_va +
- mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+ remaining_barsize = gc->bar0_size - sriov_base_off;
+ sriov_shm_off = mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+ if (sriov_shm_off >= remaining_barsize ||
+ remaining_barsize - sriov_shm_off < SMC_APERTURE_SIZE ||
+ !IS_ALIGNED(sriov_shm_off, sizeof(u32))) {
+ dev_err(gc->dev,
+ "SRIOV SHM offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
+ sriov_shm_off, (u64)gc->bar0_size);
+ return -EPROTO;
+ }
+
+ gc->shm_base = gc->bar0_va + sriov_base_off + sriov_shm_off;
return 0;
}
@@ -84,6 +103,7 @@ static int mana_gd_init_pf_regs(struct pci_dev *pdev)
static int mana_gd_init_vf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ u64 shm_off;
gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
@@ -111,7 +131,17 @@ static int mana_gd_init_vf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va + gc->db_page_off;
gc->phys_db_page_base = gc->bar0_pa + gc->db_page_off;
- gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
+ shm_off = mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
+ if (shm_off >= gc->bar0_size ||
+ gc->bar0_size - shm_off < SMC_APERTURE_SIZE ||
+ !IS_ALIGNED(shm_off, sizeof(u32))) {
+ dev_err(gc->dev,
+ "SHM offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
+ shm_off, (u64)gc->bar0_size);
+ return -EPROTO;
+ }
+
+ gc->shm_base = gc->bar0_va + shm_off;
return 0;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a654b3699c4c..9afc786b297a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2520,9 +2520,12 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi_disable_locked(napi);
netif_napi_del_locked(napi);
}
- xdp_rxq_info_unreg(&rxq->xdp_rxq);
- mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ if (rxq->rxobj != INVALID_MANA_HANDLE)
+ mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
mana_deinit_cq(apc, &rxq->rx_cq);
@@ -2796,9 +2799,6 @@ out:
mana_destroy_rxq(apc, rxq, false);
- if (cq)
- mana_deinit_cq(apc, cq);
-
return NULL;
}
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
index 0f1679ebad96..d21b5db06e50 100644
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -61,11 +61,6 @@ union smc_proto_hdr {
};
}; /* HW DATA */
-#define SMC_APERTURE_BITS 256
-#define SMC_BASIC_UNIT (sizeof(u32))
-#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
-#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
-
static int mana_smc_poll_register(void __iomem *base, bool reset)
{
void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 03a2669f0518..ee8381b60b8d 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -797,11 +797,11 @@ static int rtsn_mdio_alloc(struct rtsn_private *priv)
/* Enter config mode before registering the MDIO bus */
ret = rtsn_reset(priv);
if (ret)
- goto out_free_bus;
+ goto out_put_node;
ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
if (ret)
- goto out_free_bus;
+ goto out_put_node;
rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
@@ -824,6 +824,8 @@ static int rtsn_mdio_alloc(struct rtsn_private *priv)
return 0;
+out_put_node:
+ of_node_put(mdio_node);
out_free_bus:
mdiobus_free(mii);
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-nuvoton.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-nuvoton.c
index e2240b68ad98..2ab6ecac6422 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-nuvoton.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-nuvoton.c
@@ -100,6 +100,8 @@ static int nvt_gmac_probe(struct platform_device *pdev)
if (!priv)
return dev_err_probe(dev, -ENOMEM, "Failed to allocate private data\n");
+ priv->dev = dev;
+
priv->regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, "nuvoton,sys",
1, &priv->macid);
if (IS_ERR(priv->regmap))
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index d3772d01e00b..2451f6b20b11 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2480,8 +2480,11 @@ int wx_sw_init(struct wx *wx)
wx->oem_svid = pdev->subsystem_vendor;
wx->oem_ssid = pdev->subsystem_device;
wx->bus.device = PCI_SLOT(pdev->devfn);
- wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
- rd32(wx, WX_CFG_PORT_ST));
+ if (pdev->is_virtfn)
+ wx->bus.func = PCI_FUNC(pdev->devfn);
+ else
+ wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
+ rd32(wx, WX_CFG_PORT_ST));
if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
pdev->is_virtfn) {
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
index 29cdbed2e5ec..94ff8f5f0b4c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
@@ -99,8 +99,8 @@ int wx_request_msix_irqs_vf(struct wx *wx)
}
}
- err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
- NULL, IRQF_ONESHOT, netdev->name, wx);
+ err = request_irq(wx->msix_entry->vector, wx_msix_misc_vf,
+ 0, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index a05af192caf3..a750768912b5 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -1182,7 +1182,8 @@ void nsim_destroy(struct netdevsim *ns)
unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
&ns->nn);
- nsim_psp_uninit(ns);
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port))
+ nsim_psp_uninit(ns);
rtnl_lock();
peer = rtnl_dereference(ns->peer);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 7e129dddbbe7..d909c4160ea1 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -120,7 +120,9 @@ struct netdevsim {
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
- struct psp_dev *dev;
+ struct psp_dev __rcu *dev;
+ struct dentry *rereg;
+ struct mutex rereg_lock;
u32 spi;
u32 assoc_cnt;
} psp;
diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c
index 0b4d717253b0..6936ecb8173e 100644
--- a/drivers/net/netdevsim/psp.c
+++ b/drivers/net/netdevsim/psp.c
@@ -19,6 +19,7 @@ nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
struct netdevsim *peer_ns, struct skb_ext **psp_ext)
{
enum skb_drop_reason rc = 0;
+ struct psp_dev *peer_psd;
struct psp_assoc *pas;
struct net *net;
void **ptr;
@@ -48,7 +49,8 @@ nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
}
/* Now pretend we just received this frame */
- if (peer_ns->psp.dev->config.versions & (1 << pas->version)) {
+ peer_psd = rcu_dereference(peer_ns->psp.dev);
+ if (peer_psd && peer_psd->config.versions & (1 << pas->version)) {
bool strip_icv = false;
u8 generation;
@@ -61,8 +63,7 @@ nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
skb_ext_reset(skb);
skb->mac_len = ETH_HLEN;
- if (psp_dev_rcv(skb, peer_ns->psp.dev->id, generation,
- strip_icv)) {
+ if (psp_dev_rcv(skb, peer_psd->id, generation, strip_icv)) {
rc = SKB_DROP_REASON_PSP_OUTPUT;
goto out_unlock;
}
@@ -209,26 +210,50 @@ static struct psp_dev_caps nsim_psp_caps = {
.assoc_drv_spc = sizeof(void *),
};
-void nsim_psp_uninit(struct netdevsim *ns)
+static void __nsim_psp_uninit(struct netdevsim *ns, bool teardown)
{
- if (!IS_ERR(ns->psp.dev))
- psp_dev_unregister(ns->psp.dev);
+ struct psp_dev *psd;
+
+ psd = rcu_dereference_protected(ns->psp.dev,
+ teardown ||
+ lockdep_is_held(&ns->psp.rereg_lock));
+ if (psd) {
+ rcu_assign_pointer(ns->psp.dev, NULL);
+ synchronize_rcu();
+ psp_dev_unregister(psd);
+ }
WARN_ON(ns->psp.assoc_cnt);
}
+void nsim_psp_uninit(struct netdevsim *ns)
+{
+ debugfs_remove(ns->psp.rereg);
+ mutex_destroy(&ns->psp.rereg_lock);
+ __nsim_psp_uninit(ns, true);
+}
+
static ssize_t
nsim_psp_rereg_write(struct file *file, const char __user *data, size_t count,
loff_t *ppos)
{
struct netdevsim *ns = file->private_data;
- int err;
+ struct psp_dev *psd;
+ ssize_t ret;
+
+ mutex_lock(&ns->psp.rereg_lock);
+ __nsim_psp_uninit(ns, false);
- nsim_psp_uninit(ns);
+ psd = psp_dev_create(ns->netdev, &nsim_psp_ops, &nsim_psp_caps, ns);
+ if (IS_ERR(psd)) {
+ ret = PTR_ERR(psd);
+ goto out;
+ }
- ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
- &nsim_psp_caps, ns);
- err = PTR_ERR_OR_ZERO(ns->psp.dev);
- return err ?: count;
+ rcu_assign_pointer(ns->psp.dev, psd);
+ ret = count;
+out:
+ mutex_unlock(&ns->psp.rereg_lock);
+ return ret;
}
static const struct file_operations nsim_psp_rereg_fops = {
@@ -241,14 +266,16 @@ static const struct file_operations nsim_psp_rereg_fops = {
int nsim_psp_init(struct netdevsim *ns)
{
struct dentry *ddir = ns->nsim_dev_port->ddir;
- int err;
+ struct psp_dev *psd;
+
+ psd = psp_dev_create(ns->netdev, &nsim_psp_ops, &nsim_psp_caps, ns);
+ if (IS_ERR(psd))
+ return PTR_ERR(psd);
- ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
- &nsim_psp_caps, ns);
- err = PTR_ERR_OR_ZERO(ns->psp.dev);
- if (err)
- return err;
+ rcu_assign_pointer(ns->psp.dev, psd);
- debugfs_create_file("psp_rereg", 0200, ddir, ns, &nsim_psp_rereg_fops);
+ mutex_init(&ns->psp.rereg_lock);
+ ns->psp.rereg = debugfs_create_file("psp_rereg", 0200, ddir, ns,
+ &nsim_psp_rereg_fops);
return 0;
}
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
index db43a1f8a07a..22c555dd962e 100644
--- a/drivers/net/ovpn/io.c
+++ b/drivers/net/ovpn/io.c
@@ -85,17 +85,24 @@ static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
skb_scrub_packet(skb, true);
/* network header reset in ovpn_decrypt_post() */
+ skb_reset_mac_header(skb);
skb_reset_transport_header(skb);
skb_reset_inner_headers(skb);
/* cause packet to be "received" by the interface */
pkt_len = skb->len;
+ /* we may get here in process context in case of TCP connections,
+ * therefore we have to disable BHs to ensure gro_cells_receive()
+ * and dev_dstats_rx_add() do not get corrupted or enter deadlock
+ */
+ local_bh_disable();
ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
if (likely(ret == NET_RX_SUCCESS)) {
/* update RX stats with the size of decrypted packet */
ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
}
+ local_bh_enable();
}
void ovpn_decrypt_post(void *data, int ret)
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5198d66dbbc0..b64beade8dd9 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -563,6 +563,15 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
+void bcm_phy_update_stats_shadow(struct phy_device *phydev, u64 *shadow)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
+ bcm_phy_get_stat(phydev, shadow, i);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_update_stats_shadow);
+
void bcm_phy_r_rc_cal_reset(struct phy_device *phydev)
{
/* Reset R_CAL/RC_CAL Engine */
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index bceddbc860eb..bba94ce96195 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -85,6 +85,7 @@ int bcm_phy_get_sset_count(struct phy_device *phydev);
void bcm_phy_get_strings(struct phy_device *phydev, u8 *data);
void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
+void bcm_phy_update_stats_shadow(struct phy_device *phydev, u64 *shadow);
void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
int bcm_phy_enable_jumbo(struct phy_device *phydev);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 00e8fa14aa77..71a163f62c0e 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -807,6 +807,17 @@ static void bcm7xxx_28nm_get_phy_stats(struct phy_device *phydev,
bcm_phy_get_stats(phydev, priv->stats, stats, data);
}
+static int bcm7xxx_28nm_suspend(struct phy_device *phydev)
+{
+ struct bcm7xxx_phy_priv *priv = phydev->priv;
+
+ mutex_lock(&phydev->lock);
+ bcm_phy_update_stats_shadow(phydev, priv->stats);
+ mutex_unlock(&phydev->lock);
+
+ return genphy_suspend(phydev);
+}
+
static int bcm7xxx_28nm_probe(struct phy_device *phydev)
{
struct bcm7xxx_phy_priv *priv;
@@ -849,6 +860,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
.resume = bcm7xxx_28nm_resume, \
+ .suspend = bcm7xxx_28nm_suspend, \
.get_tunable = bcm7xxx_28nm_get_tunable, \
.set_tunable = bcm7xxx_28nm_set_tunable, \
.get_sset_count = bcm_phy_get_sset_count, \
@@ -866,6 +878,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_ephy_config_init, \
.resume = bcm7xxx_28nm_ephy_resume, \
+ .suspend = bcm7xxx_28nm_suspend, \
.get_sset_count = bcm_phy_get_sset_count, \
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
@@ -902,6 +915,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.config_aneg = genphy_config_aneg, \
.read_status = genphy_read_status, \
.resume = bcm7xxx_16nm_ephy_resume, \
+ .suspend = bcm7xxx_28nm_suspend, \
}
static struct phy_driver bcm7xxx_driver[] = {
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index bf0c6a04481e..d1a4edb34ad2 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -592,8 +592,13 @@ static int bcm54xx_set_wakeup_irq(struct phy_device *phydev, bool state)
static int bcm54xx_suspend(struct phy_device *phydev)
{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
int ret = 0;
+ mutex_lock(&phydev->lock);
+ bcm_phy_update_stats_shadow(phydev, priv->stats);
+ mutex_unlock(&phydev->lock);
+
bcm54xx_ptp_stop(phydev);
/* Acknowledge any Wake-on-LAN interrupt prior to suspend */
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2aa1dedd21b8..e211a523c258 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -4548,6 +4548,13 @@ static int lan8814_config_init(struct phy_device *phydev)
struct kszphy_priv *lan8814 = phydev->priv;
int ret;
+ if (phy_package_init_once(phydev))
+ /* Reset the PHY */
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+
/* Based on the interface type select how the advertise ability is
* encoded, to set as SGMII or as USGMII.
*/
@@ -4655,13 +4662,7 @@ static int lan8814_probe(struct phy_device *phydev)
priv->is_ptp_available = err == LAN8814_REV_LAN8814 ||
err == LAN8814_REV_LAN8818;
- if (phy_package_init_once(phydev)) {
- /* Reset the PHY */
- lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
- LAN8814_QSGMII_SOFT_RESET,
- LAN8814_QSGMII_SOFT_RESET_BIT,
- LAN8814_QSGMII_SOFT_RESET_BIT);
-
+ if (phy_package_probe_once(phydev)) {
err = lan8814_release_coma_mode(phydev);
if (err)
return err;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index df0bcfedddbc..293ef80c4e30 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -756,6 +756,7 @@ static void ax88772_mac_link_down(struct phylink_config *config,
struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
asix_write_medium_mode(dev, 0, 0);
+ usbnet_link_change(dev, false, false);
}
static void ax88772_mac_link_up(struct phylink_config *config,
@@ -786,6 +787,7 @@ static void ax88772_mac_link_up(struct phylink_config *config,
m |= AX_MEDIUM_RFC;
asix_write_medium_mode(dev, m, 0);
+ usbnet_link_change(dev, true, false);
}
static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index bb9929727eb9..0223a172851e 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -2012,6 +2012,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&apple_private_interface_info,
},
+ /* Mac */
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x1905, 0),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x1905, 2),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7337bf1b7d6a..1ace1d2398c9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10138,6 +10138,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{ USB_DEVICE(VENDOR_ID_TRENDNET, 0xe02b) },
+ { USB_DEVICE(VENDOR_ID_TRENDNET, 0xe02c) },
{}
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e35df717e65e..0cfb19b760dd 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -972,7 +972,8 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* NAPI functions as RCU section */
peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
- peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
+ peer_txq = (peer_dev && queue_idx < peer_dev->real_num_tx_queues) ?
+ netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
xdp_set_return_frame_no_direct();
done = veth_xdp_rcv(rq, budget, &bq, &stats);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 3bd57527b1be..15bfb78381d4 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -773,11 +773,6 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
kfree(priv->tx_skbuff);
priv->tx_skbuff = NULL;
- if (priv->uf_regs) {
- iounmap(priv->uf_regs);
- priv->uf_regs = NULL;
- }
-
if (priv->uccf) {
ucc_fast_free(priv->uccf);
priv->uccf = NULL;
@@ -1255,12 +1250,12 @@ static void ucc_hdlc_remove(struct platform_device *pdev)
uhdlc_memclean(priv);
- if (priv->utdm->si_regs) {
+ if (priv->utdm && priv->utdm->si_regs) {
iounmap(priv->utdm->si_regs);
priv->utdm->si_regs = NULL;
}
- if (priv->utdm->siram) {
+ if (priv->utdm && priv->utdm->siram) {
iounmap(priv->utdm->siram);
priv->utdm->siram = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 876aed765833..efb9f022d8c6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -46,6 +46,7 @@ config ATH10K_SNOC
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
+ select POWER_SEQUENCING
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 2519e2400d58..980a12fb2c6e 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1838,10 +1838,22 @@ static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab
return ag;
}
+static void ath12k_core_free_wsi_info(struct ath12k_hw_group *ag)
+{
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ of_node_put(ag->wsi_node[i]);
+ ag->wsi_node[i] = NULL;
+ }
+ ag->num_devices = 0;
+}
+
static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
{
mutex_lock(&ath12k_hw_group_mutex);
+ ath12k_core_free_wsi_info(ag);
list_del(&ag->list);
kfree(ag);
@@ -1867,52 +1879,59 @@ static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_bas
static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
struct ath12k_base *ab)
{
- struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
- struct device_node *tx_endpoint, *next_rx_endpoint;
- int device_count = 0;
-
- next_wsi_dev = wsi_dev;
+ struct device_node *next_wsi_dev;
+ int device_count = 0, ret = 0;
+ struct device_node *wsi_dev;
- if (!next_wsi_dev)
+ wsi_dev = of_node_get(ab->dev->of_node);
+ if (!wsi_dev)
return -ENODEV;
do {
- ag->wsi_node[device_count] = next_wsi_dev;
+ if (device_count >= ATH12K_MAX_DEVICES) {
+ ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+ device_count, ATH12K_MAX_DEVICES);
+ ret = -EINVAL;
+ break;
+ }
+
+ ag->wsi_node[device_count++] = of_node_get(wsi_dev);
- tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+ struct device_node *tx_endpoint __free(device_node) =
+ of_graph_get_endpoint_by_regs(wsi_dev, 0, -1);
if (!tx_endpoint) {
- of_node_put(next_wsi_dev);
- return -ENODEV;
+ ret = -ENODEV;
+ break;
}
- next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+ struct device_node *next_rx_endpoint __free(device_node) =
+ of_graph_get_remote_endpoint(tx_endpoint);
if (!next_rx_endpoint) {
- of_node_put(next_wsi_dev);
- of_node_put(tx_endpoint);
- return -ENODEV;
+ ret = -ENODEV;
+ break;
}
- of_node_put(tx_endpoint);
- of_node_put(next_wsi_dev);
-
next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
if (!next_wsi_dev) {
- of_node_put(next_rx_endpoint);
- return -ENODEV;
+ ret = -ENODEV;
+ break;
}
- of_node_put(next_rx_endpoint);
+ of_node_put(wsi_dev);
+ wsi_dev = next_wsi_dev;
+ } while (ab->dev->of_node != wsi_dev);
- device_count++;
- if (device_count > ATH12K_MAX_DEVICES) {
- ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
- device_count, ATH12K_MAX_DEVICES);
- of_node_put(next_wsi_dev);
- return -EINVAL;
+ if (ret) {
+ while (--device_count >= 0) {
+ of_node_put(ag->wsi_node[device_count]);
+ ag->wsi_node[device_count] = NULL;
}
- } while (wsi_dev != next_wsi_dev);
- of_node_put(next_wsi_dev);
+ of_node_put(wsi_dev);
+ return ret;
+ }
+
+ of_node_put(wsi_dev);
ag->num_devices = device_count;
return 0;
@@ -1983,9 +2002,9 @@ static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *a
ath12k_core_get_wsi_index(ag, ab)) {
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"unable to get wsi info from dt, grouping single device");
+ ath12k_core_free_wsi_info(ag);
ag->id = ATH12K_INVALID_GROUP_ID;
ag->num_devices = 1;
- memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
wsi->index = 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 250459facff3..b108ccd0f637 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -565,6 +565,9 @@ static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
lockdep_assert_held(&dp->dp_lock);
+ if (!peer->primary_link)
+ return 0;
+
elem = kzalloc_obj(*elem, GFP_ATOMIC);
if (!elem)
return -ENOMEM;
@@ -1337,7 +1340,7 @@ void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struc
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
- peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rx_info->peer_id);
+ peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
pubsta = peer ? peer->sta : NULL;
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index fbdfe6424fd7..df2334f3bad6 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -788,7 +788,7 @@ struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
/* To use the arvif returned, caller must have held rcu read lock.
*/
- WARN_ON(!rcu_read_lock_any_held());
+ lockdep_assert_in_rcu_read_lock();
arvif_iter.vdev_id = vdev_id;
arvif_iter.ar = ar;
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
index 59589748f1a8..19ebcd1d8eb2 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.c
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -123,7 +123,7 @@ static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ath12k_p2p_noa_arg *arg = data;
struct ath12k_link_vif *arvif;
- WARN_ON(!rcu_read_lock_any_held());
+ lockdep_assert_in_rcu_read_lock();
arvif = &ahvif->deflink;
if (!arvif->is_created || arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 65a05a9520ff..b5e904a55aea 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -9778,7 +9778,7 @@ static void
ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
- struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
+ struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info = {};
struct ath12k *ar;
s32 noise_floor;
u32 pdev_id;
@@ -10251,7 +10251,7 @@ int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_a
{
struct wmi_hw_data_filter_cmd *cmd;
struct sk_buff *skb;
- int len;
+ int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10275,7 +10275,13 @@ int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_a
"wmi hw data filter enable %d filter_bitmap 0x%x\n",
arg->enable, arg->hw_filter_bitmap);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_HW_DATA_FILTER_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
@@ -10283,6 +10289,7 @@ int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
struct wmi_wow_host_wakeup_cmd *cmd;
struct sk_buff *skb;
size_t len;
+ int ret;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10295,14 +10302,20 @@ int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_wow_enable(struct ath12k *ar)
{
struct wmi_wow_enable_cmd *cmd;
struct sk_buff *skb;
- int len;
+ int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10317,7 +10330,13 @@ int ath12k_wmi_wow_enable(struct ath12k *ar)
cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_WOW_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
@@ -10327,6 +10346,7 @@ int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
struct wmi_wow_add_del_event_cmd *cmd;
struct sk_buff *skb;
size_t len;
+ int ret;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10343,7 +10363,13 @@ int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
wow_wakeup_event(event), enable, vdev_id);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
@@ -10356,6 +10382,7 @@ int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
struct sk_buff *skb;
void *ptr;
size_t len;
+ int ret;
len = sizeof(*cmd) +
sizeof(*tlv) + /* array struct */
@@ -10435,7 +10462,13 @@ int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
bitmap->bitmaskbuf, pattern_len);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_WOW_ADD_WAKE_PATTERN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
@@ -10443,6 +10476,7 @@ int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
struct wmi_wow_del_pattern_cmd *cmd;
struct sk_buff *skb;
size_t len;
+ int ret;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10459,7 +10493,13 @@ int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
vdev_id, pattern_id);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_WOW_DEL_WAKE_PATTERN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
static struct sk_buff *
@@ -10595,6 +10635,7 @@ int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
struct wmi_pno_scan_req_arg *pno_scan)
{
struct sk_buff *skb;
+ int ret;
if (pno_scan->enable)
skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
@@ -10604,7 +10645,13 @@ int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
if (IS_ERR_OR_NULL(skb))
return -ENOMEM;
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
@@ -10717,6 +10764,7 @@ int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
void *buf_ptr;
size_t len;
u8 ns_cnt, ns_ext_tuples = 0;
+ int ret;
ns_cnt = offload->ipv6_count;
@@ -10752,7 +10800,13 @@ int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
if (ns_ext_tuples)
ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_SET_ARP_NS_OFFLOAD_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
@@ -10762,7 +10816,7 @@ int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
struct wmi_gtk_rekey_offload_cmd *cmd;
struct sk_buff *skb;
__le64 replay_ctr;
- int len;
+ int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10789,7 +10843,13 @@ int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
arvif->vdev_id, enable);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_GTK_OFFLOAD_CMDID offload\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
@@ -10797,7 +10857,7 @@ int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
{
struct wmi_gtk_rekey_offload_cmd *cmd;
struct sk_buff *skb;
- int len;
+ int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
@@ -10811,7 +10871,13 @@ int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
arvif->vdev_id);
- return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_GTK_OFFLOAD_CMDID getinfo\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_sta_keepalive(struct ath12k *ar,
@@ -10822,6 +10888,7 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
struct wmi_sta_keepalive_cmd *cmd;
struct sk_buff *skb;
size_t len;
+ int ret;
len = sizeof(*cmd) + sizeof(*arp);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
@@ -10849,7 +10916,13 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
"wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
arg->vdev_id, arg->enabled, arg->method, arg->interval);
- return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_STA_KEEPALIVE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
}
int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 05c9c07591fc..6ca31d4ea437 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1738,7 +1738,8 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
}
info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
- info->status.rates[ts->ts_final_idx + 1].idx = -1;
+ if (ts->ts_final_idx + 1 < IEEE80211_TX_MAX_RATES)
+ info->status.rates[ts->ts_final_idx + 1].idx = -1;
if (unlikely(ts->ts_status)) {
ah->stats.ack_fail++;
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 7651b1bdb592..f0b082596637 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -702,7 +702,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
* key index, but the ucode passed it slightly different.
*/
keyidx = b43_kidx_to_raw(dev, keyidx);
- B43_WARN_ON(keyidx >= ARRAY_SIZE(dev->key));
+ if (B43_WARN_ON(keyidx >= ARRAY_SIZE(dev->key)))
+ goto drop;
if (dev->key[keyidx].algorithm != B43_SEC_ALGO_NONE) {
wlhdr_len = ieee80211_hdrlen(fctl);
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index efd63f4ce74f..ee199d4eaf03 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -476,7 +476,8 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
* key index, but the ucode passed it slightly different.
*/
keyidx = b43legacy_kidx_to_raw(dev, keyidx);
- B43legacy_WARN_ON(keyidx >= dev->max_nr_keys);
+ if (B43legacy_WARN_ON(keyidx >= dev->max_nr_keys))
+ goto drop;
if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) {
/* Remove PROTECTED flag to mark it as decrypted. */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 30f6fcb68632..8fb595733b9c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2476,8 +2476,9 @@ static void brcmf_sdio_bus_stop(struct device *dev)
brcmf_dbg(TRACE, "Enter\n");
if (bus->watchdog_tsk) {
+ get_task_struct(bus->watchdog_tsk);
send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
+ kthread_stop_put(bus->watchdog_tsk);
bus->watchdog_tsk = NULL;
}
@@ -4567,8 +4568,9 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus) {
/* Stop watchdog task */
if (bus->watchdog_tsk) {
+ get_task_struct(bus->watchdog_tsk);
send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
+ kthread_stop_put(bus->watchdog_tsk);
bus->watchdog_tsk = NULL;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 4fae0e335136..5cc0c5cac257 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -310,6 +310,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
struct lbs_private *priv = cardp->priv;
cardp->surprise_removed = 1;
+ wake_up(&cardp->fw_wq);
if (priv) {
lbs_stop_card(priv);
@@ -633,9 +634,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
unsigned long flags;
u8 i;
- if (recvlength > LBS_CMD_BUFFER_SIZE) {
+ if (recvlength < MESSAGE_HEADER_LEN ||
+ recvlength > LBS_CMD_BUFFER_SIZE) {
lbs_deb_usbd(&cardp->udev->dev,
- "The receive buffer is too large\n");
+ "The receive buffer is invalid: %d\n", recvlength);
kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index 591602beeec6..3cdf9ded876d 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -70,12 +70,11 @@ static inline int rsi_create_kthread(struct rsi_common *common,
return 0;
}
-static inline int rsi_kill_thread(struct rsi_thread *handle)
+static inline void rsi_kill_thread(struct rsi_thread *handle)
{
atomic_inc(&handle->thread_done);
rsi_set_event(&handle->event);
-
- return kthread_stop(handle->task);
+ wait_for_completion(&handle->completion);
}
void rsi_mac80211_detach(struct rsi_hw *hw);
diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
index 84eb15d729c7..120f0379f81d 100644
--- a/drivers/net/wireless/st/cw1200/pm.c
+++ b/drivers/net/wireless/st/cw1200/pm.c
@@ -264,14 +264,12 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
wiphy_err(priv->hw->wiphy,
"PM request failed: %d. WoW is disabled.\n", ret);
cw1200_wow_resume(hw);
- mutex_unlock(&priv->conf_mutex);
return -EBUSY;
}
/* Force resume if event is coming from the device. */
if (atomic_read(&priv->bh_rx)) {
cw1200_wow_resume(hw);
- mutex_unlock(&priv->conf_mutex);
return -EAGAIN;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 7968e208dd37..adb29d30c63f 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -457,8 +457,20 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf
offset = sizeof(struct feature_query);
for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
+ size_t remaining = data_length - offset;
+ size_t feat_data_len, feat_total;
+
+ if (remaining < sizeof(*rt_feature))
+ break;
+
rt_feature = data + offset;
- offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
+ feat_data_len = le32_to_cpu(rt_feature->data_len);
+
+ if (feat_data_len > remaining - sizeof(*rt_feature))
+ break;
+
+ feat_total = sizeof(*rt_feature) + feat_data_len;
+ offset += feat_total;
ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
@@ -468,8 +480,10 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf
if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
return -EINVAL;
- if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
- t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
+ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) {
+ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data,
+ feat_data_len);
+ }
}
return 0;
diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
index ae632ef96698..f869e4ed9ee9 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -117,6 +117,7 @@ static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *c
* t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
* @md: Modem context.
* @msg: Message.
+ * @msg_len: Length of @msg in bytes.
*
* Used to control create/remove device node.
*
@@ -124,12 +125,18 @@ static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *c
* * 0 - Success.
* * -EFAULT - Message check failure.
*/
-int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg, size_t msg_len)
{
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned int version, port_count, i;
struct port_msg *port_msg = msg;
+ if (msg_len < sizeof(*port_msg)) {
+ dev_err(dev, "Port enum msg too short for header: need %zu, have %zu\n",
+ sizeof(*port_msg), msg_len);
+ return -EINVAL;
+ }
+
version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
if (version != PORT_ENUM_VER ||
le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
@@ -141,6 +148,13 @@ int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
}
port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
+
+ if (msg_len < struct_size(port_msg, data, port_count)) {
+ dev_err(dev, "Port enum msg too short: need %zu, have %zu\n",
+ struct_size(port_msg, data, port_count), msg_len);
+ return -EINVAL;
+ }
+
for (i = 0; i < port_count; i++) {
u32 port_info = le32_to_cpu(port_msg->data[i]);
unsigned int ch_id;
@@ -191,7 +205,7 @@ static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
case CTL_ID_PORT_ENUM:
skb_pull(skb, sizeof(*ctrl_msg_h));
- ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
+ ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data, skb->len);
if (!ret)
ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
else
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index f0918b36e899..7c3190bf0fcf 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -103,7 +103,7 @@ void t7xx_port_proxy_reset(struct port_proxy *port_prox);
void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
int t7xx_port_proxy_init(struct t7xx_modem *md);
void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state);
-int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg, size_t msg_len);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
diff --git a/drivers/platform/wmi/core.c b/drivers/platform/wmi/core.c
index 7aa40dab6145..5a2ffcbab6af 100644
--- a/drivers/platform/wmi/core.c
+++ b/drivers/platform/wmi/core.c
@@ -411,6 +411,9 @@ int wmidev_invoke_method(struct wmi_device *wdev, u8 instance, u32 method_id,
obj = aout.pointer;
if (!obj) {
+ if (min_size != 0)
+ return -ENOMSG;
+
out->length = 0;
out->data = ZERO_SIZE_PTR;
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index d1cc6e7d176c..24c151289dd3 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -243,7 +243,7 @@ static const struct dmi_system_id victus_s_thermal_profile_boards[] __initconst
},
{
.matches = { DMI_MATCH(DMI_BOARD_NAME, "8D41") },
- .driver_data = (void *)&victus_s_thermal_params,
+ .driver_data = (void *)&omen_v1_no_ec_thermal_params,
},
{
.matches = { DMI_MATCH(DMI_BOARD_NAME, "8D87") },
diff --git a/drivers/platform/x86/lenovo/wmi-other.c b/drivers/platform/x86/lenovo/wmi-other.c
index 6040f45aa2b0..6c2febe1a595 100644
--- a/drivers/platform/x86/lenovo/wmi-other.c
+++ b/drivers/platform/x86/lenovo/wmi-other.c
@@ -349,6 +349,8 @@ static int lwmi_om_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
*/
if (!relax_fan_constraint)
raw = val / LWMI_FAN_DIV * LWMI_FAN_DIV;
+ else
+ raw = val;
err = lwmi_om_fan_get_set(priv, channel, &raw, true);
if (err)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 4d32fc676aaf..71e930e80178 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -3089,6 +3089,7 @@ static const struct bus_type genpd_bus_type = {
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
+ bool is_virt_dev;
unsigned int i;
int ret = 0;
@@ -3098,6 +3099,13 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+ /* Check if the device was created by genpd at attach. */
+ is_virt_dev = dev->bus == &genpd_bus_type;
+
+ /* Disable runtime PM if we enabled it at attach. */
+ if (is_virt_dev)
+ pm_runtime_disable(dev);
+
/* Drop the default performance state */
if (dev_gpd_data(dev)->default_pstate) {
dev_pm_genpd_set_performance_state(dev, 0);
@@ -3123,7 +3131,7 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
genpd_queue_power_off_work(pd);
/* Unregister the device if it was created by genpd. */
- if (dev->bus == &genpd_bus_type)
+ if (is_virt_dev)
device_unregister(dev);
}
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index d2b8d0332951..e1cfd4223473 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -1015,6 +1015,7 @@ static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *s
struct device_node *node, *smi_np;
int num_regmaps = 0, i, j;
struct regmap *regmap[3];
+ int ret = 0;
/*
* Legacy code retrieves a maximum of three bus protection handles:
@@ -1065,11 +1066,14 @@ static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *s
if (node) {
regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
num_regmaps++;
- of_node_put(node);
- if (IS_ERR(regmap[2]))
- return dev_err_probe(dev, PTR_ERR(regmap[2]),
+ if (IS_ERR(regmap[2])) {
+ ret = dev_err_probe(dev, PTR_ERR(regmap[2]),
"%pOF: failed to get infracfg regmap\n",
node);
+ of_node_put(node);
+ return ret;
+ }
+ of_node_put(node);
} else {
regmap[2] = NULL;
}
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 6e4cb2871fca..0dcb50bf5c35 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1512,7 +1512,7 @@ static const struct rpmh_vreg_init_data pmh0101_vreg_data[] = {
RPMH_VREG("ldo13", LDO, 13, &pmic5_pldo530_mvp150, "vdd-l2-l13-l14"),
RPMH_VREG("ldo14", LDO, 14, &pmic5_pldo530_mvp150, "vdd-l2-l13-l14"),
RPMH_VREG("ldo15", LDO, 15, &pmic5_nldo530, "vdd-l15"),
- RPMH_VREG("ldo16", LDO, 15, &pmic5_pldo530_mvp600, "vdd-l5-l16"),
+ RPMH_VREG("ldo16", LDO, 16, &pmic5_pldo530_mvp600, "vdd-l5-l16"),
RPMH_VREG("ldo17", LDO, 17, &pmic5_pldo515_mv, "vdd-l17"),
RPMH_VREG("ldo18", LDO, 18, &pmic5_nldo530, "vdd-l18"),
RPMH_VREG("bob1", BOB, 1, &pmic5_bob, "vdd-bob1"),
diff --git a/drivers/spi/spi-ch341.c b/drivers/spi/spi-ch341.c
index 3eaa8f176f63..6448a44a8b67 100644
--- a/drivers/spi/spi-ch341.c
+++ b/drivers/spi/spi-ch341.c
@@ -250,5 +250,5 @@ static struct usb_driver ch341a_usb_driver = {
module_usb_driver(ch341a_usb_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
-MODULE_DESCRIPTION("QiHeng Electronics ch341 USB2SPI");
+MODULE_DESCRIPTION("Nanjing Qinheng Microelectronics CH341 USB2SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e5c907c45b87..480d1e8b281f 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1382,9 +1382,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->target_burst = t->len;
}
- spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
-
- return 0;
+ return spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
}
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
@@ -1709,6 +1707,7 @@ static int spi_imx_dma_data_prepare(struct spi_imx_data *spi_imx,
kfree(spi_imx->dma_data[0].dma_tx_buf);
kfree(spi_imx->dma_data[0].dma_rx_buf);
kfree(spi_imx->dma_data);
+ return ret;
}
}
@@ -1836,7 +1835,7 @@ static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx,
unsigned int i;
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
- if (!dma_data->dma_len % (i * bytes_per_word))
+ if (!(dma_data->dma_len % (i * bytes_per_word)))
break;
}
/* Use 1 as wml in case no available burst length got */
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index eab059fb0bc2..4dee0fea1df8 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -74,6 +74,13 @@
#define STATUS_FLAGSX4 BIT(8)
#define STATUS_MASK GENMASK(8, 0)
+/*
+ * QSPI Direct Access register defines
+ */
+#define DIRECT_ACCESS_EN_SSEL BIT(0)
+#define DIRECT_ACCESS_OP_SSEL BIT(1)
+#define DIRECT_ACCESS_OP_SSEL_SHIFT 1
+
#define BYTESUPPER_MASK GENMASK(31, 16)
#define BYTESLOWER_MASK GENMASK(15, 0)
@@ -158,7 +165,39 @@ static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_m
return 0;
}
-static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
+static void mchp_coreqspi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(spi->controller);
+ u32 val;
+
+ val = readl(qspi->regs + REG_DIRECT_ACCESS);
+
+ val &= ~DIRECT_ACCESS_OP_SSEL;
+ val |= !enable << DIRECT_ACCESS_OP_SSEL_SHIFT;
+
+ writel(val, qspi->regs + REG_DIRECT_ACCESS);
+}
+
+static int mchp_coreqspi_setup(struct spi_device *spi)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(spi->controller);
+ u32 val;
+
+ /*
+ * Active low devices need to be specifically set to their inactive
+ * states during probe.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ return 0;
+
+ val = readl(qspi->regs + REG_DIRECT_ACCESS);
+ val |= DIRECT_ACCESS_OP_SSEL;
+ writel(val, qspi->regs + REG_DIRECT_ACCESS);
+
+ return 0;
+}
+
+static void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
{
u32 control, data;
@@ -194,7 +233,7 @@ static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
}
}
-static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
+static void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
{
u32 control, data;
@@ -222,7 +261,7 @@ static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
}
}
-static inline void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi)
+static void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi)
{
u32 control, data;
@@ -380,20 +419,7 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
return 0;
}
-static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
-{
- struct spi_controller *ctlr = spi_dev->controller;
- struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
- u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
-
- control |= (CONTROL_MASTER | CONTROL_ENABLE);
- control &= ~CONTROL_CLKIDLE;
- writel_relaxed(control, qspi->regs + REG_CONTROL);
-
- return 0;
-}
-
-static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+static void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
{
u32 idle_cycles = 0;
int total_bytes, cmd_bytes, frames, ctrl;
@@ -483,6 +509,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
reinit_completion(&qspi->data_completion);
mchp_coreqspi_config_op(qspi, op);
+ mchp_coreqspi_set_cs(mem->spi, true);
if (op->cmd.opcode) {
qspi->txbuf = &opcode;
qspi->rxbuf = NULL;
@@ -523,6 +550,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
err = -ETIMEDOUT;
error:
+ mchp_coreqspi_set_cs(mem->spi, false);
mutex_unlock(&qspi->op_lock);
mchp_coreqspi_disable_ints(qspi);
@@ -662,18 +690,28 @@ static int mchp_coreqspi_transfer_one(struct spi_controller *ctlr, struct spi_de
struct spi_transfer *t)
{
struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
+ bool dual_quad = false;
qspi->tx_len = t->len;
+ if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD ||
+ t->tx_nbits == SPI_NBITS_DUAL ||
+ t->rx_nbits == SPI_NBITS_DUAL)
+ dual_quad = true;
+
if (t->tx_buf)
qspi->txbuf = (u8 *)t->tx_buf;
if (!t->rx_buf) {
mchp_coreqspi_write_op(qspi);
- } else {
+ } else if (!dual_quad) {
qspi->rxbuf = (u8 *)t->rx_buf;
qspi->rx_len = t->len;
mchp_coreqspi_write_read_op(qspi);
+ } else {
+ qspi->rxbuf = (u8 *)t->rx_buf;
+ qspi->rx_len = t->len;
+ mchp_coreqspi_read_op(qspi);
}
return 0;
@@ -686,6 +724,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
+ u32 num_cs, val;
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi));
if (!ctlr)
@@ -718,10 +757,18 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
return ret;
}
+ /*
+ * The IP core only has a single CS, any more have to be provided via
+ * gpios
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
+ num_cs = 1;
+
+ ctlr->num_chipselect = num_cs;
+
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &mchp_coreqspi_mem_ops;
ctlr->mem_caps = &mchp_coreqspi_mem_caps;
- ctlr->setup = mchp_coreqspi_setup_op;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->dev.of_node = np;
@@ -729,9 +776,21 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
ctlr->prepare_message = mchp_coreqspi_prepare_message;
ctlr->unprepare_message = mchp_coreqspi_unprepare_message;
ctlr->transfer_one = mchp_coreqspi_transfer_one;
- ctlr->num_chipselect = 2;
+ ctlr->setup = mchp_coreqspi_setup;
+ ctlr->set_cs = mchp_coreqspi_set_cs;
ctlr->use_gpio_descriptors = true;
+ val = readl_relaxed(qspi->regs + REG_CONTROL);
+ val |= (CONTROL_MASTER | CONTROL_ENABLE);
+ writel_relaxed(val, qspi->regs + REG_CONTROL);
+
+ /*
+ * Put cs into software controlled mode
+ */
+ val = readl_relaxed(qspi->regs + REG_DIRECT_ACCESS);
+ val |= DIRECT_ACCESS_EN_SSEL;
+ writel(val, qspi->regs + REG_DIRECT_ACCESS);
+
ret = spi_register_controller(ctlr);
if (ret)
return dev_err_probe(&pdev->dev, ret,