summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-18 15:46:03 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-18 15:46:03 -0700
commita78b03bc7300e4f17b1e510884bea1095d92b17b (patch)
tree855f219806462da09c8021b27848c58713bb1807 /drivers
parentfce15a609f8f30cfacfaf684729add9582be780b (diff)
parent9ff01193a20d391e8dbce4403dd5ef87c7eaaca6 (diff)
Merge tag 'v4.20-rc3' into for-4.21/block
Merge in -rc3 to resolve a few conflicts, but also to get a few important fixes that have gone into mainline since the block 4.21 branch was forked off (most notably the SCSI queue issue, which is both a conflict AND needed fix). Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/nfit/core.c19
-rw-r--r--drivers/acpi/nfit/mce.c8
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c1
-rw-r--r--drivers/clk/meson/axg.c13
-rw-r--r--drivers/clk/meson/gxbb.c12
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clocksource/i8253.c14
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpuidle/cpuidle-arm.c40
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c31
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi.c35
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c79
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c115
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h36
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c18
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c64
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c70
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c93
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c5
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c368
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c15
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c27
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c61
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/fddi/defza.c7
-rw-r--r--drivers/net/fddi/defza.h3
-rw-r--r--drivers/net/phy/broadcom.c18
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/rdma.c19
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/of_numa.c9
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/rtc-cmos.c16
-rw-r--r--drivers/rtc/rtc-pcf2127.c3
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/myrb.c3
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c1
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/tty_baudrate.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c307
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/privcmd-buf.c22
181 files changed, 1865 insertions, 849 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8f3a444c6ea9..7cea769c37df 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
- depends on MFD_AXP20X_I2C && IOSF_MBI
+ depends on MFD_AXP20X_I2C && IOSF_MBI=y
help
This config adds ACPI operation region support for XPower AXP288 PMIC.
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f8c638f3c946..14d9f5bea015 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return rc;
if (ars_status_process_records(acpi_desc))
- return -ENOMEM;
+ dev_err(acpi_desc->dev, "Failed to process ARS records\n");
- return 0;
+ return rc;
}
static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
- struct nfit_spa *nfit_spa;
- int rc = 0;
if (nvdimm)
return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
* just needs guarantees that any ARS it initiates are not
* interrupted by any intervening start requests from userspace.
*/
- mutex_lock(&acpi_desc->init_mutex);
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
- if (acpi_desc->scrub_spa
- || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
- || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
- rc = -EBUSY;
- break;
- }
- mutex_unlock(&acpi_desc->init_mutex);
+ if (work_busy(&acpi_desc->dwork.work))
+ return -EBUSY;
- return rc;
+ return 0;
}
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e9626bf6ca29..d6c1b10f6c25 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
struct acpi_nfit_desc *acpi_desc;
struct nfit_spa *nfit_spa;
- /* We only care about memory errors */
- if (!mce_is_memory_error(mce))
+ /* We only care about uncorrectable memory errors */
+ if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+ return NOTIFY_DONE;
+
+ /* Verify the address reported in the MCE is valid. */
+ if (!mce_usable_address(mce))
return NOTIFY_DONE;
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6e594644cb1d..a7f5202a4815 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
- { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232245d..4b1ff5bc256a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <source@cogentembedded.com>
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cad339ec9c19..6f2856c6d0f2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4142,10 +4142,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
+ init_completion(&cbdata.complete);
+
submit_bio(&bio);
process_fd_request();
- init_completion(&cbdata.complete);
wait_for_completion(&cbdata.complete);
__free_page(page);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 56452cabce5b..0ed4b200fa58 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
return -ENOMEM;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index ef0ca9414f37..ff83e899df71 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node);
clk_unregister_fixed_factor(clk);
return 0;
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c981159b02c0..792735d7e46e 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 9309cfaaa464..4ada9668fd49 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index e4ca6a45f313..ef1b267cb058 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "cxo",
- .parent_names = (const char *[]){ "xo_board" },
+ .parent_names = (const char *[]){ "xo-board" },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 9c38895542f4..d4350bb10b83 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,6 +20,13 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
+
+ if (i8253_clear_counter_on_shutdown) {
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
raw_spin_unlock(&i8253_lock);
return 0;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8cfee0ab804b..d8c3595e9023 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
+ int ret1;
+
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ if (ret1)
+ dev_warn(cpu_dev,
+ "failed to restore vddarm voltage: %d\n", ret1);
return ret;
}
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 073557f433eb..3a407a3ef22b 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
{
int ret;
struct cpuidle_driver *drv;
- struct cpuidle_device *dev;
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
goto out_kfree_drv;
}
- ret = cpuidle_register_driver(drv);
- if (ret) {
- if (ret != -EBUSY)
- pr_err("Failed to register cpuidle driver\n");
- goto out_kfree_drv;
- }
-
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
ret = arm_cpuidle_init(cpu);
/*
- * Skip the cpuidle device initialization if the reported
+ * Allow the initialization to continue for other CPUs, if the reported
* failure is a HW misconfiguration/breakage (-ENXIO).
*/
- if (ret == -ENXIO)
- return 0;
-
if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_unregister_drv;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto out_unregister_drv;
+ ret = ret == -ENXIO ? 0 : ret;
+ goto out_kfree_drv;
}
- dev->cpu = cpu;
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- goto out_kfree_dev;
- }
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto out_kfree_drv;
return 0;
-out_kfree_dev:
- kfree(dev);
-out_unregister_drv:
- cpuidle_unregister_driver(drv);
out_kfree_drv:
kfree(drv);
return ret;
@@ -178,9 +154,7 @@ out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
- cpuidle_unregister_device(dev);
- cpuidle_unregister_driver(drv);
- kfree(dev);
+ cpuidle_unregister(drv);
kfree(drv);
}
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index f7d6d690116e..cdc4f9a171d9 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_in_nents;
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
+ bool split = skreq->src != skreq->dst;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret)
goto err_free_split_sizes;
- if (skreq->src != skreq->dst) {
+ if (split) {
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
split_sizes[i],
skreq->src != skreq->dst,
splits_in[i], splits_in_nents[i],
- splits_out[i],
- splits_out_nents[i], info);
+ split ? splits_out[i] : NULL,
+ split ? splits_out_nents[i] : 0,
+ info);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
* more refined but this is unlikely to happen so no need.
*/
- /* Cleanup - all elements in pointer arrays have been coppied */
- kfree(splits_in_nents);
- kfree(splits_in);
- kfree(splits_out_nents);
- kfree(splits_out);
- kfree(split_sizes);
-
/* Grab a big lock for a long time to avoid concurrency issues */
mutex_lock(&queue->queuelock);
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
(!queue->havesoftqueue ||
kfifo_avail(&queue->softqueue) > steps)) ||
!list_empty(&ctx->backlog)) {
+ ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
mutex_unlock(&queue->queuelock);
- return -EBUSY;
+ goto out;
}
- ret = -EBUSY;
mutex_unlock(&queue->queuelock);
goto err_free_elements;
}
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret)
goto err_free_elements;
- return -EINPROGRESS;
+ ret = -EINPROGRESS;
+out:
+ /* Cleanup - all elements in pointer arrays have been copied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+ return ret;
err_free_elements:
list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
crypto_skcipher_ivsize(atfm),
DMA_BIDIRECTIONAL);
err_unmap_out_sg:
- if (skreq->src != skreq->dst)
+ if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
splits_out_nents, sec_req->len_out,
info->dev);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 388a929baf95..1a6a77df8a5e 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -265,6 +265,10 @@ void __init efi_init(void)
(params.mmap & ~PAGE_MASK)));
init_screen_info();
+
+ /* ARM does not permit early mappings to persist across paging_init() */
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_memmap_unmap();
}
static int __init register_gop_device(void)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 922cfb813109..a00934d263c5 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
{
u64 mapsize;
- if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
+ if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
return 0;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 249eb70691b0..fad7c62cfc0e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl));
}
+ return 0;
+}
+int __init efi_apply_persistent_mem_reservations(void)
+{
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
unsigned long prsv = efi.mem_reserve;
@@ -963,36 +967,43 @@ bool efi_is_table_address(unsigned long phys_addr)
}
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{
- struct linux_efi_memreserve *rsv, *parent;
+ struct linux_efi_memreserve *rsv;
- if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ if (!efi_memreserve_root)
return -ENODEV;
- rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
+ rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
if (!rsv)
return -ENOMEM;
- parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
- if (!parent) {
- kfree(rsv);
- return -ENOMEM;
- }
-
rsv->base = addr;
rsv->size = size;
spin_lock(&efi_mem_reserve_persistent_lock);
- rsv->next = parent->next;
- parent->next = __pa(rsv);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
spin_unlock(&efi_mem_reserve_persistent_lock);
- memunmap(parent);
+ return 0;
+}
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(efi.mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (!efi_memreserve_root)
+ return -ENOMEM;
return 0;
}
+early_initcall(efi_memreserve_root_init);
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 30ac0c975f8a..3d36142cf812 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
+ if (IS_ENABLED(CONFIG_ARM))
+ return;
+
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv);
if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 8830fa601e45..0c0d2312f4a8 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
return efi_status;
}
}
+
+ /* shrink the FDT back to its minimum size */
+ fdt_pack(fdt);
+
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index fa2904fb841f..38b686c67b17 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
void __init efi_memmap_unmap(void)
{
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
if (!efi.memmap.late) {
unsigned long size;
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index a19d845bdb06..8903b9ccfc2b 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
} \
\
init_completion(&efi_rts_work.efi_rts_comp); \
- INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ INIT_WORK(&efi_rts_work.work, efi_call_rts); \
efi_rts_work.arg1 = _arg1; \
efi_rts_work.arg2 = _arg2; \
efi_rts_work.arg3 = _arg3; \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0102cfc8efb..104b2e0d893b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 943dbf3c5da1..8de55f7f1a3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
#endif
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 352b30409060..dad0e2342df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
- /* First check if the entry is already handled */
- if (cursor.pfn < frag_start) {
- cursor.entry->huge = true;
- amdgpu_vm_pt_next(adev, &cursor);
- continue;
- }
-
/* If it isn't already handled it can't be a huge page */
if (cursor.entry->huge) {
/* Add the entry to the relocated list to update it. */
@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
} while (frag_start < entry_end);
- if (frag >= shift)
+ if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+ /* Mark all child entries as huge */
+ while (cursor.pfn < frag_start) {
+ cursor.entry->huge = true;
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+
+ } else if (frag >= shift) {
+ /* or just move on to the next on the same level. */
amdgpu_vm_pt_next(adev, &cursor);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ceb7847b504f..bfa317ad20a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index fd23ba1226a5..a0db67adc34c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a99f71797aa3..a0fda6f9252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
else
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 2d4473557b0d..d13fc4fcb517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b0df6dc9a775..c1262f62cd9f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
- /*
- * PWM interperts 0 as 100% rather than 0% because of HW
- * limitation for level 0.So limiting minimum brightness level
- * to 1.
- */
- if (bd->props.brightness < 1)
- return 1;
if (dc_link_set_backlight_level(dm->backlight_link,
bd->props.brightness, 0, 0))
return 0;
@@ -2707,18 +2703,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_connector = &aconnector->base;
if (!aconnector->dc_sink) {
- /*
- * Create dc_sink when necessary to MST
- * Don't apply fake_sink to MST
- */
- if (aconnector->mst_port) {
- dm_dp_mst_dc_sink_create(drm_connector);
- return stream;
+ if (!aconnector->mst_port) {
+ sink = create_fake_sink(aconnector);
+ if (!sink)
+ return stream;
}
-
- sink = create_fake_sink(aconnector);
- if (!sink)
- return stream;
} else {
sink = aconnector->dc_sink;
}
@@ -3308,7 +3297,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
+ .destroy = drm_primary_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 978b34a5011c..924a38a1fc44 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -160,8 +160,6 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
-
- bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 03601d717fed..d02c32a1039c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
};
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct dc_sink *dc_sink;
- struct dc_sink_init_data init_params = {
- .link = aconnector->dc_link,
- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
- /* FIXME none of this is safe. we shouldn't touch aconnector here in
- * atomic_check
- */
-
- /*
- * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
- */
- if (!aconnector->port || !aconnector->port->aux.ddc.algo)
- return;
-
- ASSERT(aconnector->edid);
-
- dc_sink = dc_link_add_remote_sink(
- aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
- &init_params);
-
- dc_sink->priv = aconnector;
- aconnector->dc_sink = dc_sink;
-
- if (aconnector->dc_sink)
- amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
-}
-
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->base.helper_private;
- struct drm_encoder *enc_master =
- connector_funcs->best_encoder(&connector->base);
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->mst_port == master
- && !aconnector->port) {
- DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- aconnector->port = port;
- drm_connector_set_path_property(connector, pathprop);
-
- drm_connector_list_iter_end(&conn_iter);
- aconnector->mst_connected = true;
- return &aconnector->base;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- aconnector->mst_connected = true;
-
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink = NULL;
}
- aconnector->mst_connected = false;
+ drm_connector_unregister(connector);
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ drm_connector_put(connector);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_connector_register(connector);
-
- if (aconnector->mst_connected)
- dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 8cf51da26657..2da851b40042 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fb04a4ad141f..5da2186b3615 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 199527171100..b57fa61b3034 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -169,6 +169,7 @@ struct link_training_settings;
struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
+ bool fbc_support;
};
enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b75ede5f84f7..b459867a05b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
- value |= 0x84;
+ if (num_pipes) {
+ struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+ if (dc->fbc_compressor)
+ value |= 0x84;
+ }
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e3624ca24574..7c9fd9052ee2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1362,7 +1362,8 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- dc->fbc_compressor = dce110_compressor_create(ctx);
+ if (dc->config.fbc_support)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 2083c308007c..470d7b89071a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
PP_AVFS_MASK = 0x40000,
};
+enum DC_FEATURE_MASK {
+ DC_FBC_MASK = 0x1,
+};
+
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index d2e7c0fa96c2..8eb0bb241210 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
struct atom_common_table_header table_header;
uint8_t smuip_min_ver;
uint8_t smuip_max_ver;
- uint8_t smu_rsd1;
+ uint8_t waflclk_ss_mode;
uint8_t gpuclk_ss_mode;
uint16_t sclk_ss_percentage;
uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
uint32_t syspll3_1_vco_freq_10khz;
uint32_t bootup_fclk_10khz;
uint32_t bootup_waflclk_10khz;
- uint32_t reserved[3];
+ uint32_t smu_info_caps;
+ uint16_t waflclk_ss_percentage; // in unit of 0.001%
+ uint16_t smuinitoffset;
+ uint32_t reserved;
};
/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 99a33c33a32c..101c09b212ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[1][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
- 1000);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
table->WatermarkRow[0][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 57143d51e3ee..99861f32b1f9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
+ data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+ 1);
+
+ return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFclkGfxClkRatio,
+ data->registry_data.fclk_gfxclk_ratio);
+}
+
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to enable all smu features!",
return result);
+ result = vega20_notify_smc_display_change(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to notify smc display change!",
+ return result);
+
+ result = vega20_send_clock_ratio(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to send clock ratio!",
+ return result);
+
/* Initialize UVD/VCE powergating state */
vega20_init_powergate_state(hwmgr);
@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret;
}
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
- bool has_disp)
-{
- struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DPM_UCLK].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
-
- return 0;
-}
-
int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req;
int ret = 0;
- if ((hwmgr->display_config->num_display > 1) &&
- !hwmgr->display_config->multi_monitor_in_sync &&
- !hwmgr->display_config->nb_pstate_switch_disable)
- vega20_notify_smc_display_change(hwmgr, false);
- else
- vega20_notify_smc_display_change(hwmgr, true);
-
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 56fe6a0d42e8..25faaa5c5b10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -328,6 +328,7 @@ struct vega20_registry_data {
uint8_t disable_auto_wattman;
uint32_t auto_wattman_debug;
uint32_t auto_wattman_sample_period;
+ uint32_t fclk_gfxclk_ratio;
uint8_t auto_wattman_threshold;
uint8_t log_avfs_param;
uint8_t enable_enginess;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 45d64a81e945..4f63a736ea0e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -105,7 +105,8 @@
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
#define PPSMC_MSG_WaflTest 0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
+// Unused ID 0x4F to 0x50
#define PPSMC_MSG_AllowGfxOff 0x51
#define PPSMC_MSG_DisallowGfxOff 0x52
#define PPSMC_MSG_GetPptLimit 0x53
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5ff1d79b86c4..0e0df398222d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
+ if (!mstb)
+ goto out;
+
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 90a1c846fc25..8aaa5e86a979 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
+ * @dev: DRM device
* @bpp: bits per pixels
* @depth: bit depth per pixel
- * @native: use host native byte order
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index e7c3ed6c9a2e..9b476368aa31 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (fence_completed(gpu, submit->out_fence->seqno))
+ if (dma_fence_is_signaled(submit->out_fence))
return;
/*
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 94529aa82339..aef487dd8731 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
return frm;
}
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
- struct decon_context *ctx = crtc->ctx;
-
- return decon_get_frame_count(ctx, false);
-}
-
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
- .get_vblank_counter = decon_get_vblank_counter,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
int ret;
ctx->drm_dev = drm_dev;
- drm_dev->max_vblank_count = 0xffffffff;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
ctx->configs[win].pixel_formats = decon_formats;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index eea90251808f..2696289ecc78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->get_vblank_counter)
- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
- return 0;
-}
-
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ec9604f1272b..5e61e707f955 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
void (*disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 07af7758066d..d81e62ae286a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -14,6 +14,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
+ struct drm_device *drm = encoder->dev;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(encoder->dev, connector,
- &exynos_dsi_connector_funcs,
+ ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
+ if (!drm->registered)
+ return 0;
+ connector->funcs->reset(connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+ drm_connector_register(connector);
return 0;
}
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel) {
+ if (IS_ERR(dsi->panel)) {
+ dsi->panel = NULL;
+ } else {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 918dd2c82209..01d182289efa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
struct drm_fb_helper *helper;
int ret;
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ if (!dev->mode_config.num_crtc)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..58e166effa45 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
- mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
- mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_entry e, m;
dma_addr_t dma_addr;
int ret;
+ struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+ bool partial_update = false;
if (bytes != 4 && bytes != 8)
return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+ e.type = GTT_TYPE_GGTT_PTE;
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
- * write, we assume the two 4 bytes writes are consecutive.
- * Otherwise, we abort and report error
+ * write, save the first 4 bytes in a list and update virtual
+ * PTE. Only update shadow PTE when the second 4 bytes comes.
*/
if (bytes < info->gtt_entry_size) {
- if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
- /* the first partial part*/
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
- return 0;
- } else if ((g_gtt_index ==
- (ggtt_mm->ggtt_mm.last_partial_off >>
- info->gtt_entry_size_shift)) &&
- (off != ggtt_mm->ggtt_mm.last_partial_off)) {
- /* the second partial part */
-
- int last_off = ggtt_mm->ggtt_mm.last_partial_off &
- (info->gtt_entry_size - 1);
-
- memcpy((void *)&e.val64 + last_off,
- (void *)&ggtt_mm->ggtt_mm.last_partial_data +
- last_off, bytes);
-
- ggtt_mm->ggtt_mm.last_partial_off = -1UL;
- } else {
- int last_offset;
-
- gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
- ggtt_mm->ggtt_mm.last_partial_off, off,
- bytes, info->gtt_entry_size);
-
- /* set host ggtt entry to scratch page and clear
- * virtual ggtt entry as not present for last
- * partially write offset
- */
- last_offset = ggtt_mm->ggtt_mm.last_partial_off &
- (~(info->gtt_entry_size - 1));
-
- ggtt_get_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate_pte(vgpu, &m);
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- ops->clear_present(&m);
- ggtt_set_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate(gvt->dev_priv);
-
- ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
- ops->clear_present(&e);
- ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ bool found = false;
+
+ list_for_each_entry_safe(pos, n,
+ &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ if (g_gtt_index == pos->offset >>
+ info->gtt_entry_size_shift) {
+ if (off != pos->offset) {
+ /* the second partial part*/
+ int last_off = pos->offset &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&pos->data + last_off,
+ bytes);
+
+ list_del(&pos->list);
+ kfree(pos);
+ found = true;
+ break;
+ }
+
+ /* update of the first partial part */
+ pos->data = e.val64;
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+ }
+ }
- return 0;
+ if (!found) {
+ /* the first partial part */
+ partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+ if (!partial_pte)
+ return -ENOMEM;
+ partial_pte->offset = off;
+ partial_pte->data = e.val64;
+ list_add_tail(&partial_pte->list,
+ &ggtt_mm->ggtt_mm.partial_pte_list);
+ partial_update = true;
}
}
- if (ops->test_present(&e)) {
+ if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
m = e;
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
} else {
- ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
}
out:
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &e);
+
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv);
- ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
intel_vgpu_reset_ggtt(vgpu, false);
+ INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
+
return create_scratch_page_tree(vgpu);
}
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
+ struct intel_gvt_partial_pte *pos;
+
+ list_for_each_entry(pos,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+ pos->offset, pos->data);
+ kfree(pos);
+ }
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
vgpu->gtt.ggtt_mm = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 7a9b36176efb..d8cb04cc946d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -35,7 +35,6 @@
#define _GVT_GTT_H_
#define I915_GTT_PAGE_SHIFT 12
-#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
+struct intel_gvt_partial_pte {
+ unsigned long offset;
+ u64 data;
+ struct list_head list;
+};
+
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
- unsigned long last_partial_off;
- u64 last_partial_data;
+ struct list_head partial_pte_list;
} ggtt_mm;
};
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f50f67909a..aa280bb07125 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
return 0;
}
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
return 0;
}
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
- MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
- MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
MMIO_D(RC6_CTX_BASE, D_BXT);
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 10e63eea5492..36a5147cd01e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44e2c0f5ec50..ffdbbac4400e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
-
/*
* If any of the channel is single rank channel, worst case output
* will be same as if single rank memory, so consider single rank
@@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
- dram_info->is_16gb_dimm = true;
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
@@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
dram_info->valid = true;
return 0;
}
@@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
int ret;
dram_info->valid = false;
- dram_info->valid_dimm = false;
- dram_info->is_16gb_dimm = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8624b4bdc242..9102571e9692 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1948,7 +1948,6 @@ struct drm_i915_private {
struct dram_info {
bool valid;
- bool valid_dimm;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 09187286d346..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
* any non-page-aligned or non-canonical addresses.
*/
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
- entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+ entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
return -EINVAL;
/* pad_to_size was once a reserved field, so sanitize it */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56c7f8637311..47c302543799 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1757,7 +1757,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
if (i == 4)
continue;
- seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
pde, pte,
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e2af5f4f39b..28039290655c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,13 +42,15 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
@@ -659,20 +661,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT(0)
-#define PIN_MAPPABLE BIT(1)
-#define PIN_ZONE_4G BIT(2)
-#define PIN_NONFAULT BIT(3)
-#define PIN_NOEVICT BIT(4)
-
-#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT(8)
-
-#define PIN_HIGH BIT(9)
-#define PIN_OFFSET_BIAS BIT(10)
-#define PIN_OFFSET_FIXED BIT(11)
+#define PIN_NONBLOCK BIT_ULL(0)
+#define PIN_MAPPABLE BIT_ULL(1)
+#define PIN_ZONE_4G BIT_ULL(2)
+#define PIN_NONFAULT BIT_ULL(3)
+#define PIN_NOEVICT BIT_ULL(4)
+
+#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT_ULL(8)
+
+#define PIN_HIGH BIT_ULL(9)
+#define PIN_OFFSET_BIAS BIT_ULL(10)
+#define PIN_OFFSET_FIXED BIT_ULL(11)
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7c491ea3d052..e31c27e45734 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2095,8 +2095,12 @@ enum i915_power_well_id {
/* ICL PHY DFLEX registers */
#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
-#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -4593,12 +4597,12 @@ enum {
#define DRM_DIP_ENABLE (1 << 28)
#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 26)
-#define VSC_SELECT_SHIFT 26
-#define VSC_DIP_HW_HEA_DATA (0 << 26)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
-#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
#define VDIP_ENABLE_PPS (1 << 24)
/* Panel power sequencing */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 769f3f586661..ee3ca2de983b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -144,6 +144,9 @@ static const struct {
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
static const struct {
int sample_rate;
int clock;
@@ -164,6 +167,20 @@ static const struct {
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c763428..8d74276029e6 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Limiting to 99% as a temporary workaround. See
- * intel_min_cdclk() for details.
- */
- return 2 * max_cdclk_freq * 99 / 100;
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c6448d53..01fa98299bae 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[ss] & BIT(ss)))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9741cc419e1b..a54843fdeb2f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4850,8 +4850,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
* chroma samples for both of the luma samples, and thus we don't
* actually get the expected MPEG2 chroma siting convention :(
* The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | | 1.5 (initial phase)
+ * | | |
+ * v v v
+ * | s | s | s | s |
+ * | d |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | | 0.0
+ * | | |
+ * v v v
+ * | s |
+ * | d | d | d | d |
*/
-u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
{
int phase = -0x8000;
u16 trip = 0;
@@ -4859,6 +4882,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
if (chroma_cosited)
phase += (sub - 1) * 0x8000 / sub;
+ phase += scale / (2 * sub);
+
+ /*
+ * Hardware initial phase limited to [-0.5:1.5].
+ * Since the max hardware scale factor is 3.0, we
+ * should never actually excdeed 1.0 here.
+ */
+ WARN_ON(phase < -0x8000 || phase > 0x18000);
+
if (phase < 0)
phase = 0x10000 + phase;
else
@@ -5067,13 +5099,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
if (crtc->config->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
+ int pfit_w, pfit_h, hscale, vscale;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
- uv_rgb_hphase = skl_scaler_calc_phase(1, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
+ pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
+
+ hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
+ vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -12768,17 +12807,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!new_crtc_state->active) {
- /*
- * Make sure we don't call initial_watermarks
- * for ILK-style watermark updates.
- *
- * No clue what this is supposed to achieve.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
- }
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->active &&
+ !HAS_GMCH_DISPLAY(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(new_crtc_state));
}
}
@@ -14646,7 +14680,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
fb->height < SKL_MIN_YUV_420_SRC_H ||
(fb->width % 4) != 0 || (fb->height % 4) != 0)) {
DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
- return -EINVAL;
+ goto err;
}
for (i = 0; i < fb->format->num_planes; i++) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1b00f8ea145b..a911691dbd0f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
- intel_connector->mst_port = intel_dp;
- intel_connector->port = port;
-
for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f8dc84b2d2d3..8b298e5f012d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1646,7 +1646,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-u16 skl_scaler_calc_phase(int sub, bool chroma_center);
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 648a13c6043c..9a8018130237 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_connector->encoder->hpd_pin == pin) {
+ /* Don't check MST ports, they don't have pins */
+ if (!intel_connector->mst_port &&
+ intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
+ u32 long_hpd_pulse_mask = 0;
+ u32 short_hpd_pulse_mask = 0;
+ enum hpd_pin pin;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
+
+ /*
+ * Determine whether ->hpd_pulse() exists for each pin, and
+ * whether we have a short or a long pulse. This is needed
+ * as each pin may have up to two encoders (HDMI and DP) and
+ * only the one of them (DP) will have ->hpd_pulse().
+ */
for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum hpd_pin pin = encoder->hpd_pin;
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
+ enum port port = encoder->port;
+ bool long_hpd;
+ pin = encoder->hpd_pin;
if (!(BIT(pin) & pin_mask))
continue;
- if (has_hpd_pulse) {
- bool long_hpd = long_mask & BIT(pin);
- enum port port = encoder->port;
+ if (!has_hpd_pulse)
+ continue;
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
- long_hpd ? "long" : "short");
- /*
- * For long HPD pulses we want to have the digital queue happen,
- * but we still want HPD storm detection to function.
- */
- queue_dig = true;
- if (long_hpd) {
- dev_priv->hotplug.long_port_mask |= (1 << port);
- } else {
- /* for short HPD just trigger the digital queue */
- dev_priv->hotplug.short_port_mask |= (1 << port);
- continue;
- }
+ long_hpd = long_mask & BIT(pin);
+
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ long_hpd ? "long" : "short");
+ queue_dig = true;
+
+ if (long_hpd) {
+ long_hpd_pulse_mask |= BIT(pin);
+ dev_priv->hotplug.long_port_mask |= BIT(port);
+ } else {
+ short_hpd_pulse_mask |= BIT(pin);
+ dev_priv->hotplug.short_port_mask |= BIT(port);
}
+ }
+
+ /* Now process each pin just once */
+ for_each_hpd_pin(pin) {
+ bool long_hpd;
+
+ if (!(BIT(pin) & pin_mask))
+ continue;
if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
- if (!has_hpd_pulse) {
+ /*
+ * Delegate to ->hpd_pulse() if one of the encoders for this
+ * pin has it, otherwise let the hotplug_work deal with this
+ * pin directly.
+ */
+ if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+ long_hpd = long_hpd_pulse_mask & BIT(pin);
+ } else {
dev_priv->hotplug.event_bits |= BIT(pin);
+ long_hpd = true;
queue_hp = true;
}
+ if (!long_hpd)
+ continue;
+
if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index cdf19553ffac..5d5336fbe7b0 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
-}
+ dev_priv->lpe_audio.irq = -1;
+ dev_priv->lpe_audio.platdev = NULL;
+}
/**
* intel_lpe_audio_notify() - notify lpe audio event
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 43957bb37a42..37c94a54efcb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
- /* True 32b PPGTT with dynamic page allocation: update PDP
+ /*
+ * True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
return ce->lrc_desc;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..245f0022bcfd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2881,8 +2881,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (!dev_priv->dram_info.valid_dimm ||
- dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.is_16gb_dimm)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d0ef50bf930a..187bb0ceb4ac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,6 +91,7 @@ static int
gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
+ int i;
/*
* read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_ISP;
}
- cs = intel_ring_begin(rq, 2);
+ i = 2;
+ if (mode & EMIT_INVALIDATE)
+ i += 20;
+
+ cs = intel_ring_begin(rq, i);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+
+ /*
+ * A random delay to let the CS invalidate take effect? Without this
+ * delay, the GPU relocation path fails as the CS does not see
+ * the updated contents. Just as important, if we apply the flushes
+ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+ * write and before the invalidate on the next batch), the relocations
+ * still fail. This implies that is a delay following invalidation
+ * that is required to reset the caches as opposed to a delay to
+ * ensure the memory is written.
+ */
+ if (mode & EMIT_INVALIDATE) {
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ for (i = 0; i < 12; i++)
+ *cs++ = MI_FLUSH;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ *cs++ = cmd;
+
intel_ring_advance(rq, cs);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0fdabce647ab..44e4491a4918 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2749,6 +2749,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
.name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
@@ -2760,12 +2766,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
@@ -3176,8 +3176,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u32 val;
+ const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
bool ret;
if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3187,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
if (req_slices == hw_enabled_slices || req_slices == 0)
return;
- val = I915_READ(DBUF_CTL_S2);
if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
else
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5fd2f7bf3927..d3090a7537bb 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -302,13 +302,65 @@ skl_plane_max_stride(struct intel_plane *plane,
return min(8192 * cpp, 32768);
}
+static void
+skl_program_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+ int hscale, vscale;
+
+ hscale = drm_rect_calc_hscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+
+ /* TODO: handle sub-pixel coordinates */
+ if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
+ y_hphase = skl_scaler_calc_phase(1, hscale, false);
+ y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ }
+
+ I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+ I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+ I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+}
+
void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
@@ -318,8 +370,6 @@ skl_update_plane(struct intel_plane *plane,
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -329,8 +379,6 @@ skl_update_plane(struct intel_plane *plane,
/* Sizes are 0 based */
src_w--;
src_h--;
- crtc_w--;
- crtc_h--;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -353,41 +401,8 @@ skl_update_plane(struct intel_plane *plane,
(plane_state->color_plane[1].y << 16) |
plane_state->color_plane[1].x);
- /* program plane scaler */
if (plane_state->scaler_id >= 0) {
- int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[scaler_id];
- u16 y_hphase, uv_rgb_hphase;
- u16 y_vphase, uv_rgb_vphase;
-
- /* TODO: handle sub-pixel coordinates */
- if (fb->format->format == DRM_FORMAT_NV12) {
- y_hphase = skl_scaler_calc_phase(1, false);
- y_vphase = skl_scaler_calc_phase(1, false);
-
- /* MPEG2 chroma siting convention */
- uv_rgb_hphase = skl_scaler_calc_phase(2, true);
- uv_rgb_vphase = skl_scaler_calc_phase(2, false);
- } else {
- /* not used */
- y_hphase = 0;
- y_vphase = 0;
-
- uv_rgb_hphase = skl_scaler_calc_phase(1, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, false);
- }
-
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
- ((crtc_w + 1) << 16)|(crtc_h + 1));
+ skl_program_scaler(plane, crtc_state, plane_state);
I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
} else {
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 8d03f64eabd7..5c22f2c8d4cf 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
- pr_err("page_sizes.gtt=%u, expected %lu\n",
+ pr_err("page_sizes.gtt=%u, expected %llu\n",
vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8e2e269db97e..127d81513671 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != offset ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
offset, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 514245e69b38..acbbad3e322c 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -854,6 +854,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
+ /* Use VENCI for 480i and 576i and double HDMI pixels */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ hdmi_repeat = true;
+ use_enci = true;
+ venc_hdmi_latency = 1;
+ }
+
if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
if (!vmode) {
@@ -865,13 +872,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
} else {
meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
vmode = &vmode_dmt;
- }
-
- /* Use VENCI for 480i and 576i and double HDMI pixels */
- if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
- hdmi_repeat = true;
- use_enci = true;
- venc_hdmi_latency = 1;
+ use_enci = false;
}
/* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 394c129cfb3b..0a485c5b982e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
- if (dsi->data->quirks & DSI_QUIRK_GNQ)
+ if (dsi->data->quirks & DSI_QUIRK_GNQ) {
+ dsi_runtime_get(dsi);
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
- else
+ dsi_runtime_put(dsi);
+ } else {
dsi->num_lanes_supported = 3;
+ }
r = dsi_init_output(dsi);
if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
}
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r)
+ if (r) {
DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_uninit_output;
+ }
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_uninit_output;
+ goto err_of_depopulate;
return 0;
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
/* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq);
- dispc_runtime_put(dsi->dss->dispc);
-
return 0;
}
static int dsi_runtime_resume(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(dsi->dss->dispc);
- if (r)
- return r;
dsi->is_enabled = true;
/* ensure the irq handler sees the is_enabled value */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 1aaf260aa9b8..7553c7fc1c45 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
dss);
/* Add all the child devices as components. */
+ r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (r)
+ goto err_uninit_debugfs;
+
omapdss_gather_components(&pdev->dev);
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
- goto err_uninit_debugfs;
+ goto err_of_depopulate;
return 0;
+err_of_depopulate:
+ of_platform_depopulate(&pdev->dev);
+
err_uninit_debugfs:
dss_debugfs_remove_file(dss->debugfs.clk);
dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
{
struct dss_device *dss = platform_get_drvdata(pdev);
+ of_platform_depopulate(&pdev->dev);
+
component_master_del(&pdev->dev, &dss_component_ops);
dss_debugfs_remove_file(dss->debugfs.clk);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cf6230eac31a..aabdda394c9c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->dss = dss;
- r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ r = hdmi_runtime_get(hdmi);
if (r)
return r;
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ goto err_runtime_put;
+
r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
if (r)
goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
hdmi);
+ hdmi_runtime_put(hdmi);
+
return 0;
err_cec_uninit:
hdmi4_cec_uninit(&hdmi->core);
err_pll_uninit:
hdmi_pll_uninit(&hdmi->pll);
+err_runtime_put:
+ hdmi_runtime_put(hdmi);
return r;
}
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dispc_runtime_put(hdmi->dss->dispc);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(hdmi->dss->dispc);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap4-hdmi", },
{},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
.remove = hdmi4_remove,
.driver = {
.name = "omapdss_hdmi",
- .pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b0e4a7463f8c..9e8556f67a29 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dispc_runtime_put(hdmi->dss->dispc);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(hdmi->dss->dispc);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap5-hdmi", },
{ .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
.remove = hdmi5_remove,
.driver = {
.name = "omapdss_hdmi5",
- .pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ff0b18c8e4ac..b5f52727f8b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
if (venc->tv_dac_clk)
clk_disable_unprepare(venc->tv_dac_clk);
- dispc_runtime_put(venc->dss->dispc);
-
return 0;
}
static int venc_runtime_resume(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
- int r;
-
- r = dispc_runtime_get(venc->dss->dispc);
- if (r < 0)
- return r;
if (venc->tv_dac_clk)
clk_prepare_enable(venc->tv_dac_clk);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 62928ec0e7db..caffc547ef97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
int ret;
DBG("%s", omap_crtc->name);
+ priv->dispc_ops->runtime_get(priv->dispc);
+
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc);
ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_off(crtc);
+
+ priv->dispc_ops->runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index af7dcb6da351..e7eb0d1e17be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index bf068da6b12e..f4a22689eb54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c78cd35a1294..f949287d926c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -491,7 +491,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ if (tcon->panel)
+ sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -555,7 +556,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
struct drm_panel *panel = tcon->panel;
struct drm_connector *connector = panel->connector;
struct drm_display_info display_info = connector->display_info;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf2a18571d48..a132c37d7334 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
+ /* notify if GPU has been already bound */
+ if (ops->gpu_bound)
+ ops->gpu_bound(pdev, id);
}
mutex_unlock(&vgasr_mutex);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 56ccb1ea7da5..f2c681971201 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
This driver can also be built as a module. If so, the module
will be called i2c-nforce2-s4985.
+config I2C_NVIDIA_GPU
+ tristate "NVIDIA GPU I2C controller"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the
+ NVIDIA GPU I2C controller which is used to communicate with the GPU's
+ Type-C controller. This driver can also be built as a module called
+ i2c-nvidia-gpu.
+
config I2C_SIS5595
tristate "SiS 5595"
depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
config I2C_OMAP
tristate "OMAP I2C adapter"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || ARCH_K3
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 18b26af82b1c..5f0cb6915969 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644
index 000000000000..8822357bca0c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL 0x00
+#define I2C_MST_CNTL_GEN_START BIT(0)
+#define I2C_MST_CNTL_GEN_STOP BIT(1)
+#define I2C_MST_CNTL_CMD_READ (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6
+#define I2C_MST_CNTL_GEN_NACK BIT(28)
+#define I2C_MST_CNTL_STATUS GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31)
+
+#define I2C_MST_ADDR 0x04
+
+#define I2C_MST_I2C0_TIMING 0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24)
+
+#define I2C_MST_DATA 0x0c
+
+#define I2C_MST_HYBRID_PADCTL 0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15)
+
+struct gpu_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct i2c_adapter adapter;
+ struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+ u32 val;
+
+ /* enable I2C */
+ val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+ val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+ I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+ writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+ /* enable 100KHZ mode */
+ val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+ val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+ << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+ val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+ writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+ unsigned long target = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ do {
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+ break;
+ if ((val & I2C_MST_CNTL_STATUS) !=
+ I2C_MST_CNTL_STATUS_BUS_BUSY)
+ break;
+ usleep_range(500, 600);
+ } while (time_is_after_jiffies(target));
+
+ if (time_is_before_jiffies(target)) {
+ dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+ return -ETIME;
+ }
+
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ switch (val & I2C_MST_CNTL_STATUS) {
+ case I2C_MST_CNTL_STATUS_OKAY:
+ return 0;
+ case I2C_MST_CNTL_STATUS_NO_ACK:
+ return -EIO;
+ case I2C_MST_CNTL_STATUS_TIMEOUT:
+ return -ETIME;
+ default:
+ return 0;
+ }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+ int status;
+ u32 val;
+
+ val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+ (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+ I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ status = gpu_i2c_check_status(i2cd);
+ if (status < 0)
+ return status;
+
+ val = readl(i2cd->regs + I2C_MST_DATA);
+ switch (len) {
+ case 1:
+ data[0] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, data);
+ break;
+ case 3:
+ put_unaligned_be16(val >> 8, data);
+ data[2] = val;
+ break;
+ case 4:
+ put_unaligned_be32(val, data);
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+ u32 val;
+
+ writel(data, i2cd->regs + I2C_MST_DATA);
+
+ val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+ int status, status2;
+ int i, j;
+
+ /*
+ * The controller supports maximum 4 byte read due to known
+ * limitation of sending STOP after every read.
+ */
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD) {
+ /* program client address before starting read */
+ writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+ /* gpu_i2c_read has implicit start */
+ status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+ if (status < 0)
+ goto stop;
+ } else {
+ u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+ status = gpu_i2c_start(i2cd);
+ if (status < 0) {
+ if (i == 0)
+ return status;
+ goto stop;
+ }
+
+ status = gpu_i2c_write(i2cd, addr);
+ if (status < 0)
+ goto stop;
+
+ for (j = 0; j < msgs[i].len; j++) {
+ status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+ if (status < 0)
+ goto stop;
+ }
+ }
+ }
+ status = gpu_i2c_stop(i2cd);
+ if (status < 0)
+ return status;
+
+ return i;
+stop:
+ status2 = gpu_i2c_stop(i2cd);
+ if (status2 < 0)
+ dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+ return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+ .max_read_len = 4,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+ .master_xfer = gpu_i2c_master_xfer,
+ .functionality = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+ struct i2c_client *ccgx_client;
+
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+ sizeof(*i2cd->gpu_ccgx_ucsi),
+ GFP_KERNEL);
+ if (!i2cd->gpu_ccgx_ucsi)
+ return -ENOMEM;
+
+ strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+ sizeof(i2cd->gpu_ccgx_ucsi->type));
+ i2cd->gpu_ccgx_ucsi->addr = 0x8;
+ i2cd->gpu_ccgx_ucsi->irq = irq;
+ ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+ if (!ccgx_client)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpu_i2c_dev *i2cd;
+ int status;
+
+ i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+ if (!i2cd)
+ return -ENOMEM;
+
+ i2cd->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, i2cd);
+
+ status = pcim_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+ return status;
+ }
+
+ pci_set_master(pdev);
+
+ i2cd->regs = pcim_iomap(pdev, 0, 0);
+ if (!i2cd->regs) {
+ dev_err(&pdev->dev, "pcim_iomap failed\n");
+ return -ENOMEM;
+ }
+
+ status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+ return status;
+ }
+
+ gpu_enable_i2c_bus(i2cd);
+
+ i2c_set_adapdata(&i2cd->adapter, i2cd);
+ i2cd->adapter.owner = THIS_MODULE;
+ strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ sizeof(i2cd->adapter.name));
+ i2cd->adapter.algo = &gpu_i2c_algorithm;
+ i2cd->adapter.quirks = &gpu_i2c_quirks;
+ i2cd->adapter.dev.parent = &pdev->dev;
+ status = i2c_add_adapter(&i2cd->adapter);
+ if (status < 0)
+ goto free_irq_vectors;
+
+ status = gpu_populate_client(i2cd, pdev->irq);
+ if (status < 0) {
+ dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+ goto del_adapter;
+ }
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+ i2c_del_adapter(&i2cd->adapter);
+ pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+ gpu_enable_i2c_bus(i2cd);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+ .name = "nvidia-gpu",
+ .id_table = gpu_i2c_ids,
+ .probe = gpu_i2c_probe,
+ .remove = gpu_i2c_remove,
+ .driver = {
+ .pm = &gpu_i2c_driver_pm,
+ },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 527f55c8c4c7..db075bc0d952 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
- ret = i2c_add_adapter(&gi2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
- return ret;
- }
-
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
pm_runtime_use_autosuspend(gi2c->se.dev);
pm_runtime_enable(gi2c->se.dev);
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ pm_runtime_disable(gi2c->se.dev);
+ return ret;
+ }
+
return 0;
}
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
- pm_runtime_disable(gi2c->se.dev);
i2c_del_adapter(&gi2c->adap);
+ pm_runtime_disable(gi2c->se.dev);
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index ce7acd115dd8..1870cf87afe1 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
{
struct pattern_trig_data *data = from_timer(data, t, timer);
- mutex_lock(&data->lock);
-
for (;;) {
if (!data->is_indefinite && !data->repeat)
break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
data->curr->brightness);
mod_timer(&data->timer,
jiffies + msecs_to_jiffies(data->curr->delta_t));
-
- /* Skip the tuple with zero duration */
- pattern_trig_update_patterns(data);
+ if (!data->next->delta_t) {
+ /* Skip the tuple with zero duration */
+ pattern_trig_update_patterns(data);
+ }
/* Select next tuple */
pattern_trig_update_patterns(data);
} else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
break;
}
-
- mutex_unlock(&data->lock);
}
static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
if (res < -1 || res == 0)
return -EINVAL;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
struct pattern_trig_data *data = led_cdev->trigger_data;
int ccount, cr, offset = 0, err = 0;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index e514d57a0419..aa983422aa97 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS
+ select BCH_CONST_PARAMS if !MTD_NAND_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 784c6e1a0391..fd5fe12d7461 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
- struct mtd_info *cdev[nr];
+ struct mtd_info **cdev;
+
+ cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* We detected multiple devices. Concatenate them together.
*/
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
+ kfree(cdev);
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 05bd0779fe9b..71050a0b31df 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -590,7 +590,6 @@ retry:
/**
* panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: timeout
*
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index e24db817154e..d846428ef038 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -996,7 +996,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
err_unmap:
dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
- return 0;
+ return ret;
}
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 9407ca5f9443..3e54e31889c7 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3250,12 +3250,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
memcpy(&sfdp_params, params, sizeof(sfdp_params));
memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
- if (spi_nor_parse_sfdp(nor, &sfdp_params))
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
/* restore previous erase map */
memcpy(&nor->erase_map, &prev_map,
sizeof(nor->erase_map));
- else
+ } else {
memcpy(params, &sfdp_params, sizeof(*params));
+ }
}
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d721ccf7d8be..38e399e0f30e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
if (err)
return err;
+ /* Keep the histogram mode bits */
+ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6a633c70f603..99ef1daaa4d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 fc = aq_nic->aq_nic_cfg.flow_control;
pause->autoneg = 0;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- pause->rx_pause = 1;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- pause->tx_pause = 1;
+ pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+ pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+
}
static int aq_ethtool_set_pauseparam(struct net_device *ndev,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index e8689241204e..a1e70da358ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -204,6 +204,10 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ int (*hw_set_offload)(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+ int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
+ u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
int (*set_flow_control)(struct aq_hw_s *self);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e3ae29e523f0..7c07eef275eb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
+ int err = 0;
+
+ aq_cfg->features = features;
- if (aq_cfg->hw_features & NETIF_F_LRO) {
+ if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
}
}
}
+ if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+ err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+ aq_cfg);
- return 0;
+ return err;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 5fed24446687..7abdc0952425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
}
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
- cfg->hw_features = cfg->aq_hw_caps->hw_features;
+ cfg->features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+ u32 fc = 0;
if (err)
return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ if (self->aq_hw_ops->hw_set_fc)
+ self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
}
}
- if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ /* Asym is when either RX or TX, but not both */
+ if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index c1582f4e8e1b..44ec47a3d60a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -23,7 +23,7 @@ struct aq_vec_s;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
- u64 hw_features;
+ u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3db91446cc67..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
return !!budget;
}
+static void aq_rx_checksum(struct aq_ring_s *self,
+ struct aq_ring_buff_s *buff,
+ struct sk_buff *skb)
+{
+ if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(buff->is_cso_err)) {
+ ++self->stats.rx.errors;
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
skb->protocol = eth_type_trans(skb, ndev);
- if (unlikely(buff->is_cso_err)) {
- ++self->stats.rx.errors;
- skb->ip_summed = CHECKSUM_NONE;
- } else {
- if (buff->is_ip_cso) {
- __skb_incr_checksum_unnecessary(skb);
- if (buff->is_udp_cso || buff->is_tcp_cso)
- __skb_incr_checksum_unnecessary(skb);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+
+ aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 76d25d594a0f..f02592f43fe3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
+static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+ return 0;
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
- bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
- unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
+ u8 rx_stat = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
- is_err = (0x0000003CU & rxd_wb->status);
+ rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
- is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
- if (is_rx_check_sum_enabled) {
- if (0x0U == (pkt_type & 0x3U))
- buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(0) &&
+ (0x0U == (pkt_type & 0x3U)))
+ buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(1)) {
if (0x4U == (pkt_type & 0x1CU))
- buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+ buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
else if (0x0U == (pkt_type & 0x1CU))
- buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
-
- /* Checksum offload workaround for small packets */
- if (rxd_wb->pkt_len <= 60) {
- buff->is_ip_cso = 0U;
- buff->is_cso_err = 0U;
- }
+ buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
+ }
+ buff->is_cso_err = !!(rx_stat & 0x6);
+ /* Checksum offload workaround for small packets */
+ if (unlikely(rxd_wb->pkt_len <= 60)) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
}
-
- is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
- if (is_err || rxd_wb->type & 0x1000U) {
- /* status error or DMA error */
+ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+ /* MAC error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index be0a3a90dfad..5502ec5f0f69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+ init);
+}
+
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 7056c7342afc..41f239928c15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
+/* set rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 716674a9b729..a715fa317b1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -293,6 +293,24 @@
/* default value of bitfield desc{d}_reset */
#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 096ca5730887..7de3220d9cab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -30,6 +30,8 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
return 0;
}
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+
+ if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_RX;
+ else
+ *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
+ else
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_TX;
+ else
+ *fcmode = 0;
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3f96aa30068e..20fcf0d1c2ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
- return ret;
+ if (ret)
+ return ret;
}
ret = hns3_restore_fd_rules(netdev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7893beffcc71..c9d5d0a7fbf1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
- if (adapter->vlan_header_insertion) {
+ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bc71a21c1dc2..21c2688d6308 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_SCTP_CRC |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
-
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
netdev->hw_features |= hw_features;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4c4b5717a627..b8548370f1c7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_RESET_WAIT 20
+
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+void ice_napi_del(struct ice_vsi *vsi);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 8cd6a2401fd9..554fd707a6d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw);
+
+ /* Clear VSI contexts if not already cleared */
+ ice_clear_all_vsi_ctx(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 96923580f2a6..648acdb4c644 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back */
+ /* Give it a little more time to try to come back. If still
+ * down, restart autoneg link or reinitialize the interface.
+ */
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
+
+ ice_down(vsi);
+ ice_up(vsi);
}
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5fdea6ec7675..596b9fb1c510 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -242,6 +242,8 @@
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLPCI_CNF2 0x000BE004
+#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5bacad01f0c9..1041fa2a7767 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
* on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ ice_napi_del(vsi);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 05993451147a..333312a1d595 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1465,7 +1465,7 @@ skip_req_irq:
* ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed
*/
-static void ice_napi_del(struct ice_vsi *vsi)
+void ice_napi_del(struct ice_vsi *vsi)
{
int v_idx;
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
+ int ret = ice_cfg_vlan_pruning(vsi, true);
+
if (ret)
return ret;
}
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- ret = ice_vsi_add_vlan(vsi, vid);
-
- if (!ret)
- set_bit(vid, vsi->active_vlans);
-
- return ret;
+ return ice_vsi_add_vlan(vsi, vid);
}
/**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
if (status)
return status;
- clear_bit(vid, vsi->active_vlans);
-
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
status = ice_cfg_vlan_pruning(vsi, false);
@@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver should be able to handle
+ * 128 Byte cache lines, so we only print a warning in case issues are seen,
+ * specifically with Tx.
+ */
+static void ice_verify_cacheline_size(struct ice_pf *pf)
+{
+ if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
+ dev_warn(&pf->pdev->dev,
+ "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ ICE_CACHE_LINE_BYTES);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ ice_verify_cacheline_size(pf);
+
return 0;
err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
if (!pf)
return;
+ for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
+ if (!ice_is_reset_in_progress(pf->state))
+ break;
+ msleep(100);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
@@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
- int err;
- u16 vid;
-
- if (!vsi->netdev)
- return -EINVAL;
-
- err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
-
- for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
- err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_restore_vlan(vsi);
+
+ err = ice_vsi_vlan_setup(vsi);
+
if (err)
return err;
}
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
- int err;
+ int err, i;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
}
ice_reset_all_vfs(pf, true);
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ bool link_up;
+
+ if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
+ continue;
+ ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+ if (link_up) {
+ netif_carrier_on(pf->vsi[i]->netdev);
+ netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+ } else {
+ netif_carrier_off(pf->vsi[i]->netdev);
+ netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+ }
+ }
+
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 33403f39f1b3..40c9c6558956 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the hw struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_VSI; i++)
+ ice_clear_vsi_ctx(hw, i);
+}
+
+/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
* @vsi_handle: unique VSI handle provided by drivers
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index b88d96a1ef69..d5ef0bd58bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5dae968d853e..fe5bbabbb41e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
- first->bytecount = (first->gso_segs - 1) * off->header_len;
+ first->bytecount += (first->gso_segs - 1) * off->header_len;
cd_tso_len = skb->len - off->header_len;
cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
- * return (((size >> 12) * 85) >> 8) + 1;
+ * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
*
* Since multiplication and division are commutative, we can reorder
* operations into:
- * return ((size * 85) >> 20) + 1;
+ * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
*/
static unsigned int ice_txd_use_count(unsigned int size)
{
- return ((size * 85) >> 20) + 1;
+ return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
}
/**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
+ ICE_DESCS_FOR_CTX_DESC)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1d0f58bd389b..75d0eaf6c9dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -22,8 +22,21 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* We are assuming that the cache line is always 64 Bytes here for ice.
+ * In order to make sure that is a correct assumption there is a check in probe
+ * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+ * size is 128 bytes. We do it this way because we do not want to read the
+ * GLPCI_CNF2 register or a variable containing the value on every pass through
+ * the Tx path.
+ */
+#define ICE_CACHE_LINE_BYTES 64
+#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
+ sizeof(struct ice_tx_desc))
+#define ICE_DESCS_FOR_CTX_DESC 1
+#define ICE_DESCS_FOR_SKB_DATA_PTR 1
+/* Tx descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
+ ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 12f9432abf11..f4dbc81c1988 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -92,12 +92,12 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
+ u16 req_speeds;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
u8 pacing;
- u8 req_speeds;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 45f10f8f01dc..e71065f9d391 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR;
ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_vsi_add_vlan(vsi, vid)) {
vf->num_vlan++;
- set_bit(vid, vsi->active_vlans);
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
if (!ice_vsi_kill_vlan(vsi, vid)) {
vf->num_vlan--;
- clear_bit(vid, vsi->active_vlans);
/* Disable VLAN pruning when removing VLAN 0 */
if (unlikely(!vid))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 29ced6b74d36..2b95dc9c7a6a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -53,13 +53,15 @@
* 2^40 * 10^-9 / 60 = 18.3 minutes.
*
* SYSTIM is converted to real time using a timecounter. As
- * timecounter_cyc2time() allows old timestamps, the timecounter
- * needs to be updated at least once per half of the SYSTIM interval.
- * Scheduling of delayed work is not very accurate, so we aim for 8
- * minutes to be sure the actual interval is shorter than 9.16 minutes.
+ * timecounter_cyc2time() allows old timestamps, the timecounter needs
+ * to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, and also the NIC
+ * clock can be adjusted to run up to 6% faster and the system clock
+ * up to 10% slower, so we aim for 6 minutes to be sure the actual
+ * interval in the NIC time is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..3ba672e9e353 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -494,7 +494,7 @@ struct mvneta_port {
#if defined(__LITTLE_ENDIAN)
struct mvneta_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u16 data_size; /* Data size of transmitted packet in bytes */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
u32 reserved2; /* hw_cmd - (for future use, PMT) */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
#else
struct mvneta_tx_desc {
u16 data_size; /* Data size of transmitted packet in bytes */
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u32 command; /* Options used by HW for packet transmitting.*/
u32 reserved2; /* hw_cmd - (for future use, PMT) */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cc1b373c0ace..46dc93d3b9b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err;
}
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc)
- return rc;
+ goto err;
cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid);
- return rc;
+ goto err;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 1135387bd99d..4f8a685d1a55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 82a1bd1f8a8c..67c02ea93906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
- /* Return spq entry which is taken in qed_sp_init_request()*/
- qed_spq_return_entry(p_hwfn, p_ent);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%d is not supported yet\n",
p_filter_cmd->opcode);
+ qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL;
}
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
- return rc;
+ goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id);
if (rc)
- return rc;
+ goto err;
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c71391b9c757..62113438c880 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default:
rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
SET_FIELD(p_ramrod->flags1,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9167d1354bb..e49fada85410 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..3157c0d99441 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+
+ /* Posted entry for unlimited list entry in EBLOCK mode */
+ struct qed_spq_entry *post_ent;
};
struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data;
};
+/**
+ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+ * Should be called on in error flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 77b6248ad3b9..888274fa208b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -47,6 +47,19 @@
#include "qed_sp.h"
#include "qed_sriov.h"
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ /* qed_spq_get_entry() can either get an entry from the free_pool,
+ * or, if no entries are left, allocate a new entry and add it to
+ * the unlimited_pending list.
+ */
+ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+ kfree(p_ent);
+ else
+ qed_spq_return_entry(p_hwfn, p_ent);
+}
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
- return -EINVAL;
+ goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
- return -EINVAL;
+ goto err;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+
+ return -EINVAL;
}
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..0a9c5bb0fa48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- goto out;
+ return 0;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1)
+ if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
-out:
- qed_ptt_release(p_hwfn, p_ptt);
- return 0;
-
+ return 0;
+ }
err:
- qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
+ else
+ p_ent->post_ent = p_en2;
p_ent = p_en2;
}
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ __set_bit(pos, p_spq->p_comp_bitmap);
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+}
+
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
- /* This is an allocated p_ent which does not need to
- * return to pool.
- */
+ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
kfree(p_ent);
- return rc;
+
+ /* Return the entry which was actually posted */
+ p_ent = p_post_ent;
}
if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail:
/* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
list_del(&p_ent->list);
-
- /* Avoid overriding of SPQ entries when getting
- * out-of-order completions, by marking the completions
- * in a bitmap and increasing the chain consumer only
- * for the first successive completed entries.
- */
- __set_bit(pos, p_spq->p_comp_bitmap);
-
- while (test_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap)) {
- __clear_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap);
- p_spq->comp_bitmap_idx++;
- qed_chain_return_produced(&p_spq->chain);
- }
-
+ qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent;
break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
- (found->queue == &p_spq->unlimited_pending))
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for returning its own entry into the
- * free list, unless it originally added the entry into the
- * unlimited pending list.
+ * free list.
*/
qed_spq_return_entry(p_hwfn, found);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9b08a9d9e151..ca6290fa0f30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9647578cbe6a..14f26bf3b388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
{
- u8 l4proto, opcode = 0, hdr_len = 0;
+ u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ tag_vlan = 1;
} else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = skb_vlan_tag_get(skb);
+ tag_vlan = 1;
}
if (unlikely(adapter->tx_pvid)) {
- if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
- if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
flags = QLCNIC_FLAGS_VLAN_OOB;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 0afc3d335d56..d11c16aeb19a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
- struct rmnet_priv *priv;
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
+ priv->real_dev = real_dev;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
- priv = netdev_priv(rmnet_dev);
priv->mux_id = id;
- priv->real_dev = real_dev;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b1b305f8f414..272b9ca66314 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -365,7 +365,8 @@ struct dma_features {
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
+/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index ca9d7e48034c..40d6356a7e73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -31,7 +31,7 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 77914c89d749..5ef91a790f9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index abc3f85270cd..d8c5bc412219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
static int set_16kib_bfsize(int mtu)
{
int ret = 0;
- if (unlikely(mtu >= BUF_SIZE_8KiB))
+ if (unlikely(mtu > BUF_SIZE_8KiB))
ret = BUF_SIZE_16KiB;
return ret;
}
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 3b7f10a5f06a..c5cae8e74dc4 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -56,7 +56,7 @@
#define DRV_VERSION "v.1.1.4"
#define DRV_RELDATE "Oct 6 2018"
-static char version[] =
+static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
static void fza_tx_smt(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
- struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ struct fza_buffer_tx __iomem *smt_tx_ptr;
int i, len;
u32 own;
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
if (dev_nit_active(dev)) {
+ struct fza_buffer_tx *skb_data_ptr;
struct sk_buff *skb;
/* Length must be a multiple of 4 as only word
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
index b06acf32738e..93bda61be8e3 100644
--- a/drivers/net/fddi/defza.h
+++ b/drivers/net/fddi/defza.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
#define FZA_RING_CMD 0x200400 /* command ring address */
#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
* size
+ */
/* Command constants. */
#define FZA_RING_CMD_MASK 0x7fffffff
#define FZA_RING_CMD_NOP 0x00000000 /* nop */
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e86ea105c802..704537010453 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
-static int bcm5481x_config(struct phy_device *phydev)
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
- bcm5481x_config(phydev);
+ bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Aneg firsly. */
+ ret = genphy_config_aneg(phydev);
+
+ /* Then we can set up the delay. */
+ bcm54xx_config_clock_delay(phydev);
+
+ return ret;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
+ .config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2d17f3b9bb16..f2d01cb6f958 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+ dev->net->min_mtu = ETH_MIN_MTU;
+ dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
pdata->dev = dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f172d63db2b5..91474b3c566c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ndev)
nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk)
+ if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ }
#endif
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index b82b0d3ca39a..8b841f39734c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
+ blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4efe289dc7b..a5f9bbce863f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
- if (!ctrl->p2p_client)
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
if (ns->p2p_dev) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count;
};
-static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_workqueue(nvmet_rdma_delete_wq);
+ flush_scheduled_work();
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
}
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
- nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
- if (!nvmet_rdma_delete_wq) {
- ret = -ENOMEM;
- goto err_unreg_transport;
- }
-
return 0;
-err_unreg_transport:
- nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 0f27fad9fe94..5592437bb3d1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
- dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
+ /* ...but only set bus mask if we found valid dma-ranges earlier */
+ if (!ret)
+ dev->bus_dma_mask = mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 35c64a4295e0..fe6b13608e51 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
distance = of_read_number(matrix, 1);
matrix++;
+ if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+ (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+ pr_err("Invalid distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+ return -EINVAL;
+ }
+
numa_set_distance(nodea, nodeb, distance);
- pr_debug("distance[node%d -> node%d] = %d\n",
- nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
if (nodeb > nodea)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 2a4aa6468579..921db6f80340 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -793,15 +793,10 @@ static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct acpi_device *adev = ACPI_COMPANION(dev);
- int node;
if (!adev)
return;
- node = acpi_get_node(adev->handle);
- if (node != NUMA_NO_NODE)
- set_dev_node(dev, node);
-
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_add_pm_notifier(adev, pci_dev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 4ceb06f8a33c..4edeb4cae72a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
static struct meson_bank meson_gxbb_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 7dae1d7bf6b0..158f618f1695 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
static struct meson_bank meson_gxl_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index f8b778a7d471..53d449076dee 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_update_bits(pc->reg_pull, reg,
+ ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), 0);
if (ret)
return ret;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index c6d79315218f..86466173114d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = {
static struct meson_bank meson8_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index bb2a30964fc6..647ad15d5c3c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = {
static struct meson_bank meson8b_aobus_banks[] = {
/* name first lastc irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e79f2a181ad2..b9ec4a16db1f 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
tv64.tv_sec = rtc_tm_to_time64(&tm);
#if BITS_PER_LONG == 32
- if (tv64.tv_sec > INT_MAX)
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
goto err_read;
+ }
#endif
err = do_settimeofday64(&tv64);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index df0c5776d49b..a5a19ff10535 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char rtc_control;
+ /* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
return -EIO;
@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
unsigned char mon, mday, hrs, min, sec, rtc_control;
int ret;
+ /* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
return -EIO;
@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
-
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = {
.alarm_irq_enable = cmos_alarm_irq_enable,
};
+static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
+ .read_time = cmos_read_time,
+ .set_time = cmos_set_time,
+ .proc = cmos_procfs,
+};
+
/*----------------------------------------------------------------*/
/*
@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
goto cleanup1;
}
+
+ cmos_rtc.rtc->ops = &cmos_rtc_ops;
+ } else {
+ cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
}
- cmos_rtc.rtc->ops = &cmos_rtc_ops;
cmos_rtc.rtc->nvram_old_abi = true;
retval = rtc_register_device(cmos_rtc.rtc);
if (retval)
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9f99a0966550..7cb786d76e3c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context,
memcpy(buf + 1, val, val_size);
ret = i2c_master_send(client, buf, val_size + 1);
+
+ kfree(buf);
+
if (ret != val_size + 1)
return ret < 0 ? ret : -EIO;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index dfdc6940de2f..f38882f6f37d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -566,6 +566,7 @@ config SCSI_MYRB
config SCSI_MYRS
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
depends on PCI
+ depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8429c855701f..01c23d27f290 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1198,7 +1198,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
out:
if (!hostdata->selecting)
- return NULL;
+ return false;
hostdata->selecting = NULL;
return ret;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index f0e457e6884e..8df822a4a1bd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index cc36b6473e98..77a85ead483e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1670,11 +1670,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index bd4ce38b98d2..a369450a1fa7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -886,11 +886,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct hisi_sas_slot *s, *s1, *s2 = NULL;
- struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp;
- dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 0c8005bb0f53..34d311a7dbef 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -698,6 +698,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
+ else
+ nrport = NULL;
spin_unlock(&phba->hbalock);
if (!nrport)
continue;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index aeb282f617c5..0642f2d0a3bb 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1049,7 +1049,8 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
enquiry2->fw.firmware_type = '0';
enquiry2->fw.turn_id = 0;
}
- sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+ snprintf(cb->fw_version, sizeof(cb->fw_version),
+ "%d.%02d-%c-%02d",
enquiry2->fw.major_version,
enquiry2->fw.minor_version,
enquiry2->fw.firmware_type,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 0264a2e2bc19..b8d54ef8cf6d 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -163,9 +163,12 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
dma_addr_t ctlr_info_addr;
union myrs_sgl *sgl;
unsigned char status;
- struct myrs_ctlr_info old;
+ unsigned short ldev_present, ldev_critical, ldev_offline;
+
+ ldev_present = cs->ctlr_info->ldev_present;
+ ldev_critical = cs->ctlr_info->ldev_critical;
+ ldev_offline = cs->ctlr_info->ldev_offline;
- memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
sizeof(struct myrs_ctlr_info),
DMA_FROM_DEVICE);
@@ -198,9 +201,9 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
cs->ctlr_info->rbld_active +
cs->ctlr_info->exp_active != 0)
cs->needs_update = true;
- if (cs->ctlr_info->ldev_present != old.ldev_present ||
- cs->ctlr_info->ldev_critical != old.ldev_critical ||
- cs->ctlr_info->ldev_offline != old.ldev_offline)
+ if (cs->ctlr_info->ldev_present != ldev_present ||
+ cs->ctlr_info->ldev_critical != ldev_critical ||
+ cs->ctlr_info->ldev_offline != ldev_offline)
shost_printk(KERN_INFO, cs->host,
"Logical drive count changes (%d/%d/%d)\n",
cs->ctlr_info->ldev_critical,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6fe20c27acc1..eb59c796a795 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4763,6 +4763,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->fp_speed = PORT_SPEED_UNKNOWN;
fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fdf3e52ee908..2c9efcc71a41 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xplogiabsentdevice,
"Option to enable PLOGI to devices that are not present after "
"a Fabric scan. This is needed for several broken switches. "
- "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+ "Default is 0 - no PLOGI. 1 - perform PLOGI.");
int ql2xloginretrycount = 0;
module_param(ql2xloginretrycount, int, S_IRUGO);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5d83a162d03b..0df15cb738d2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -610,6 +610,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
*/
scsi_mq_uninit_cmd(cmd);
+ /*
+ * queue is still alive, so grab the ref for preventing it
+ * from being cleaned up during running queue.
+ */
+ percpu_ref_get(&q->q_usage_counter);
+
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
@@ -618,6 +624,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
else
blk_mq_run_hw_queues(q, true);
+ percpu_ref_put(&q->q_usage_counter);
put_device(&sdev->sdev_gendev);
return false;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index fb308ea8e9a5..27db55b0ca7f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8099,7 +8099,6 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
-
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index e31e4fc31aa1..2cfd61d62e97 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0;
+ int ret = 0, post_ret;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
@@ -1790,7 +1790,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
transport_complete_task_attr(cmd);
if (cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd, false, NULL);
+ cmd->transport_complete_callback(cmd, false, &post_ret);
if (transport_check_aborted_status(cmd, 1))
return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ff6ba6d86cd8..cc56cb3b3eca 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->rx_timer.function = rx_timer_fn;
+ s->chan_rx_saved = s->chan_rx = chan;
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
-
- s->chan_rx_saved = s->chan_rx = chan;
}
}
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
+ unsigned int type = port->port.type; /* uart_remove_... clears it */
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
- port->port.type == PORT_HSCIF) {
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 7576ceace571..f438eaa68246 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
#else /* IBSHIFT */
return tty_termios_baud_rate(termios);
#endif /* IBSHIFT */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 55370e651db3..41ec8e5010f3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
- do_update_region(vc, (unsigned long) start, count);
+ do_update_region(vc, (unsigned long)(start + offset), count);
}
static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index e36d6c73c4a4..78118883f96c 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -23,6 +23,16 @@ config TYPEC_UCSI
if TYPEC_UCSI
+config UCSI_CCG
+ tristate "UCSI Interface Driver for Cypress CCGx"
+ depends on I2C
+ help
+ This driver enables UCSI support on platforms that expose a
+ Cypress CCGx Type-C controller over I2C interface.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_ccg.
+
config UCSI_ACPI
tristate "UCSI ACPI Interface Driver"
depends on ACPI
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 7afbea512207..2f4900b26210 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -8,3 +8,5 @@ typec_ucsi-y := ucsi.o
typec_ucsi-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644
index 000000000000..de8a43bdff68
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+ struct device *dev;
+ struct ucsi *ucsi;
+ struct ucsi_ppm ppm;
+ struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG 0x06
+#define CCGX_RAB_UCSI_CONTROL 0x39
+#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+ unsigned char buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = data,
+ },
+ };
+ u32 rlen, rem_len = len, max_read_len = len;
+ int status;
+
+ /* check any max_read_len limitation on i2c adapter */
+ if (quirks && quirks->max_read_len)
+ max_read_len = quirks->max_read_len;
+
+ while (rem_len > 0) {
+ msgs[1].buf = &data[len - rem_len];
+ rlen = min_t(u16, rem_len, max_read_len);
+ msgs[1].len = rlen;
+ put_unaligned_le16(rab, buf);
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ return status;
+ }
+ rab += rlen;
+ rem_len -= rlen;
+ }
+
+ return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ unsigned char *buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ }
+ };
+ int status;
+
+ buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le16(rab, buf);
+ memcpy(buf + sizeof(rab), data, len);
+
+ msgs[0].len = len + sizeof(rab);
+ msgs[0].buf = buf;
+
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ kfree(buf);
+ return status;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+ unsigned int count = 10;
+ u8 data;
+ int status;
+
+ data = CCGX_RAB_UCSI_CONTROL_STOP;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ data = CCGX_RAB_UCSI_CONTROL_START;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ /*
+ * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+ * register write will push response which must be cleared.
+ */
+ do {
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ if (!data)
+ return 0;
+
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ usleep_range(10000, 11000);
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+ status = ccg_write(uc, rab, ppm +
+ offsetof(struct ucsi_data, message_out),
+ sizeof(uc->ppm.data->message_out));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+ return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+ sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+ status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+ sizeof(uc->ppm.data->cci));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+ return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+ sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+ int status;
+ unsigned char data;
+
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+ int status;
+
+ status = ucsi_ccg_recv_data(uc);
+ if (status < 0)
+ return status;
+
+ /* ack interrupt to allow next command to run */
+ return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+ ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+ struct ucsi_ccg *uc = data;
+
+ ucsi_notify(uc->ucsi);
+
+ return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ucsi_ccg *uc;
+ int status;
+ u16 rab;
+
+ uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+ if (!uc)
+ return -ENOMEM;
+
+ uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+ if (!uc->ppm.data)
+ return -ENOMEM;
+
+ uc->ppm.cmd = ucsi_ccg_cmd;
+ uc->ppm.sync = ucsi_ccg_sync;
+ uc->dev = dev;
+ uc->client = client;
+
+ /* reset ccg device and initialize ucsi */
+ status = ucsi_ccg_init(uc);
+ if (status < 0) {
+ dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+ return status;
+ }
+
+ status = devm_request_threaded_irq(dev, client->irq, NULL,
+ ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
+ if (status < 0) {
+ dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+ return status;
+ }
+
+ uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+ if (IS_ERR(uc->ucsi)) {
+ dev_err(uc->dev, "ucsi_register_ppm failed\n");
+ return PTR_ERR(uc->ucsi);
+ }
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+ status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+ offsetof(struct ucsi_data, version),
+ sizeof(uc->ppm.data->version));
+ if (status < 0) {
+ ucsi_unregister_ppm(uc->ucsi);
+ return status;
+ }
+
+ i2c_set_clientdata(client, uc);
+ return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+ struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+ ucsi_unregister_ppm(uc->ucsi);
+
+ return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+ {"ccgx-ucsi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+ .driver = {
+ .name = "ucsi_ccg",
+ },
+ .probe = ucsi_ccg_probe,
+ .remove = ucsi_ccg_remove,
+ .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f15f89df1f36..7ea6fb6a2e5d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
- pr_debug("Failed to decrease reservation for DMA buffer\n");
+ pr_debug("Failed to increase reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index df1ed37c3269..de01a6d0059d 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -21,15 +21,9 @@
MODULE_LICENSE("GPL");
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
- "the privcmd-buf device per open file");
-
struct privcmd_buf_private {
struct mutex lock;
struct list_head list;
- unsigned int allocated;
};
struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
{
unsigned int i;
- vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++)
- if (vma_priv->pages[i])
- __free_page(vma_priv->pages[i]);
+ __free_page(vma_priv->pages[i]);
kfree(vma_priv);
}
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned int i;
int ret = 0;
- if (!(vma->vm_flags & VM_SHARED) || count > limit ||
- file_priv->allocated + count > limit)
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!vma_priv)
return -ENOMEM;
- vma_priv->n_pages = count;
- count = 0;
- for (i = 0; i < vma_priv->n_pages; i++) {
+ for (i = 0; i < count; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i])
break;
- count++;
+ vma_priv->n_pages++;
}
mutex_lock(&file_priv->lock);
- file_priv->allocated += count;
-
vma_priv->file_priv = file_priv;
vma_priv->users = 1;