diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 14:56:17 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 14:56:17 -0800 |
| commit | 28eb75e178d389d325f1666e422bc13bbbb9804c (patch) | |
| tree | 20417b4e798f98fc5687e80c1e0126afcf437c70 /drivers/gpu/drm/xlnx | |
| parent | 071b34dcf71523a559b6c39f5d21a268a9531b50 (diff) | |
| parent | a163b895077861598be48c1cf7f4a88413c28b22 (diff) | |
Merge tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie:
"There's a lot of rework, the panic helper support is being added to
more drivers, v3d gets support for HW superpages, scheduler
documentation, drm client and video aperture reworks, some new
MAINTAINERS added, amdgpu has the usual lots of IP refactors, Intel
has some Pantherlake enablement and xe is getting some SRIOV bits, but
just lots of stuff everywhere.
core:
- split DSC helpers from DP helpers
- clang build fixes for drm/mm test
- drop simple pipeline support for gem vram
- document submission error signaling
- move drm_rect to drm core module from kms helper
- add default client setup to most drivers
- move to video aperture helpers instead of drm ones
tests:
- new framebuffer tests
ttm:
- remove swapped and pinned BOs from TTM lru
panic:
- fix uninit spinlock
- add ABGR2101010 support
bridge:
- add TI TDP158 support
- use standard PM OPS
dma-fence:
- use read_trylock instead of read_lock to help lockdep
scheduler:
- add errno to sched start to report different errors
- add locking to drm_sched_entity_modify_sched
- improve documentation
xe:
- add drm_line_printer
- lots of refactoring
- Enable Xe2 + PES disaggregation
- add new ARL PCI ID
- SRIOV development work
- fix exec unnecessary implicit fence
- define and parse OA sync props
- forcewake refactoring
i915:
- Enable BMG/LNL ultra joiner
- Enable 10bpx + CCS scanout on ICL+, fp16/CCS on TGL+
- use DSB for plane/color mgmt
- Arrow lake PCI IDs
- lots of i915/xe display refactoring
- enable PXP GuC autoteardown
- Pantherlake (PTL) Xe3 LPD display enablement
- Allow fastset HDR infoframe changes
- write DP source OUI for non-eDP sinks
- share PCI IDs between i915 and xe
amdgpu:
- SDMA queue reset support
- SMU 13.0.6, JPEG 4.0.3 updates
- Initial runtime repartitioning support
- rework IP structs for multiple IP instances
- Fetch EDID from _DDC if available
- SMU13 zero rpm user control
- lots of fixes/cleanups
amdkfd:
- Increase event FIFO size
- add topology cap flag for per queue reset
msm:
- DPU:
- SA8775P support
- (disabled by default) MSM8917, MSM8937, MSM8953 and MSM8996 support
- Enable large framebuffer support
- Drop MSM8998 and SDM845
- DP:
- SA8775P support
- GPU:
- a7xx preemption support
- Adreno A663 support
ast:
- warn about unsupported TX chips
ivpu:
- add coredump
- add pantherlake support
rockchip:
- 4K@60Hz display enablement
- generate pll programming tables
panthor:
- add timestamp query API
- add realtime group priority
- add fdinfo support
etnaviv:
- improve handling of DMA address limits
- improve GPU hangcheck
exynos:
- Decon Exynos7870 support
mediatek:
- add OF graph support
omap:
- locking fixes
bochs:
- convert to gem/shmem from simpledrm
v3d:
- support big/super pages
- add gemfs
vc4:
- BCM2712 support refactoring
- add YUV444 format support
udmabuf:
- folio related fixes
nouveau:
- add panic support on nv50+"
* tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel: (1583 commits)
drm/xe/guc: Fix dereference before NULL check
drm/amd: Fix initialization mistake for NBIO 7.7.0
Revert "drm/amd/display: parse umc_info or vram_info based on ASIC"
drm/amd/display: Fix failure to read vram info due to static BP_RESULT
drm/amdgpu: enable GTT fallback handling for dGPUs only
drm/amd/amdgpu: limit single process inside MES
drm/fourcc: add AMD_FMT_MOD_TILE_GFX9_4K_D_X
drm/amdgpu/mes12: correct kiq unmap latency
drm/amdgpu: Support vcn and jpeg error info parsing
drm/amd : Update MES API header file for v11 & v12
drm/amd/amdkfd: add/remove kfd queues on start/stop KFD scheduling
drm/amdkfd: change kfd process kref count at creation
drm/amdgpu: Cleanup shift coding style
drm/amd/amdgpu: Increase MES log buffer to dump mes scratch data
drm/amdgpu: Implement virt req_ras_err_count
drm/amdgpu: VF Query RAS Caps from Host if supported
drm/amdgpu: Add msg handlers for SRIOV RAS Telemetry
drm/amdgpu: Update SRIOV Exchange Headers for RAS Telemetry Support
drm/amd/display: 3.2.309
drm/amd/display: Adjust VSDB parser for replay feature
...
Diffstat (limited to 'drivers/gpu/drm/xlnx')
| -rw-r--r-- | drivers/gpu/drm/xlnx/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/xlnx/zynqmp_disp.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/xlnx/zynqmp_dp.c | 843 | ||||
| -rw-r--r-- | drivers/gpu/drm/xlnx/zynqmp_kms.c | 10 |
4 files changed, 806 insertions, 51 deletions
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index 626e5ac4c33d..4197f44e202f 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -6,6 +6,7 @@ config DRM_ZYNQMP_DPSUB depends on PHY_XILINX_ZYNQMP depends on XILINX_ZYNQMP_DPDMA select DMA_ENGINE + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 9368acf56eaf..e4e0e299e8a7 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1200,6 +1200,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp, { unsigned int i; + if (!layer->info) + return; + for (i = 0; i < layer->info->num_channels; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 129beac4c073..25c5dc61ee88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -18,7 +18,9 @@ #include <drm/drm_modes.h> #include <drm/drm_of.h> +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> @@ -51,6 +53,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_LANE_COUNT_SET 0x4 #define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8 #define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc +#define ZYNQMP_DP_LINK_QUAL_PATTERN_SET 0x10 #define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14 #define ZYNQMP_DP_DOWNSPREAD_CTL 0x18 #define ZYNQMP_DP_SOFTWARE_RESET 0x1c @@ -64,6 +67,9 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \ ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \ ZYNQMP_DP_SOFTWARE_RESET_AUX) +#define ZYNQMP_DP_COMP_PATTERN_80BIT_1 0x20 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_2 0x24 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_3 0x28 /* Core enable registers */ #define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80 @@ -207,6 +213,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf +#define ZYNQMP_DP_TRANSMIT_PRBS7 0x230 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c #define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244 @@ -275,30 +282,108 @@ struct zynqmp_dp_config { }; /** + * enum test_pattern - Test patterns for test testing + * @TEST_VIDEO: Use regular video input + * @TEST_SYMBOL_ERROR: Symbol error measurement pattern + * @TEST_PRBS7: Output of the PRBS7 (x^7 + x^6 + 1) polynomial + * @TEST_80BIT_CUSTOM: A custom 80-bit pattern + * @TEST_CP2520: HBR2 compliance eye pattern + * @TEST_TPS1: Link training symbol pattern TPS1 (/D10.2/) + * @TEST_TPS2: Link training symbol pattern TPS2 + * @TEST_TPS3: Link training symbol pattern TPS3 (for HBR2) + */ +enum test_pattern { + TEST_VIDEO, + TEST_TPS1, + TEST_TPS2, + TEST_TPS3, + TEST_SYMBOL_ERROR, + TEST_PRBS7, + TEST_80BIT_CUSTOM, + TEST_CP2520, +}; + +static const char *const test_pattern_str[] = { + [TEST_VIDEO] = "video", + [TEST_TPS1] = "tps1", + [TEST_TPS2] = "tps2", + [TEST_TPS3] = "tps3", + [TEST_SYMBOL_ERROR] = "symbol-error", + [TEST_PRBS7] = "prbs7", + [TEST_80BIT_CUSTOM] = "80bit-custom", + [TEST_CP2520] = "cp2520", +}; + +/** + * struct zynqmp_dp_test - Configuration for test mode + * @pattern: The test pattern + * @enhanced: Use enhanced framing + * @downspread: Use SSC + * @active: Whether test mode is active + * @custom: Custom pattern for %TEST_80BIT_CUSTOM + * @train_set: Voltage/preemphasis settings + * @bw_code: Bandwidth code for the link + * @link_cnt: Number of lanes + */ +struct zynqmp_dp_test { + enum test_pattern pattern; + bool enhanced, downspread, active; + u8 custom[10]; + u8 train_set[ZYNQMP_DP_MAX_LANES]; + u8 bw_code; + u8 link_cnt; +}; + +/** + * struct zynqmp_dp_train_set_priv - Private data for train_set debugfs files + * @dp: DisplayPort IP core structure + * @lane: The lane for this file + */ +struct zynqmp_dp_train_set_priv { + struct zynqmp_dp *dp; + int lane; +}; + +/** * struct zynqmp_dp - Xilinx DisplayPort core * @dev: device structure * @dpsub: Display subsystem * @iomem: device I/O memory for register access * @reset: reset controller + * @lock: Mutex protecting this struct and register access (but not AUX) * @irq: irq * @bridge: DRM bridge for the DP encoder * @next_bridge: The downstream bridge + * @test: Configuration for test mode * @config: IP core configuration from DTS * @aux: aux channel + * @aux_done: Completed when we get an AUX reply or timeout + * @ignore_aux_errors: If set, AUX errors are suppressed * @phy: PHY handles for DP lanes * @num_lanes: number of enabled phy lanes * @hpd_work: hot plug detection worker + * @hpd_irq_work: hot plug detection IRQ worker + * @ignore_hpd: If set, HPD events and IRQs are ignored * @status: connection status * @enabled: flag to indicate if the device is enabled * @dpcd: DP configuration data from currently connected sink device * @link_config: common link configuration between IP core and sink device * @mode: current mode between IP core and sink device * @train_set: set of training data + * @debugfs_train_set: Debugfs private data for @train_set + * + * @lock covers the link configuration in this struct and the device's + * registers. It does not cover @aux or @ignore_aux_errors. It is not strictly + * required for any of the members which are only modified at probe/remove time + * (e.g. @dev). */ struct zynqmp_dp { struct drm_dp_aux aux; struct drm_bridge bridge; struct work_struct hpd_work; + struct work_struct hpd_irq_work; + struct completion aux_done; + struct mutex lock; struct drm_bridge *next_bridge; struct device *dev; @@ -310,9 +395,13 @@ struct zynqmp_dp { enum drm_connector_status status; int irq; bool enabled; + bool ignore_aux_errors; + bool ignore_hpd; + struct zynqmp_dp_train_set_priv debugfs_train_set[ZYNQMP_DP_MAX_LANES]; struct zynqmp_dp_mode mode; struct zynqmp_dp_link_config link_config; + struct zynqmp_dp_test test; struct zynqmp_dp_config config; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 train_set[ZYNQMP_DP_MAX_LANES]; @@ -626,6 +715,7 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, /** * zynqmp_dp_update_vs_emph - Update the training values * @dp: DisplayPort IP core structure + * @train_set: A set of training values * * Update the training values based on the request from sink. The mapped values * are predefined, and values(vs, pe, pc) are from the device manual. @@ -633,12 +723,12 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, * Return: 0 if vs and emph are updated successfully, or the error code returned * by drm_dp_dpcd_write(). */ -static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) +static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp, u8 *train_set) { unsigned int i; int ret; - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set, + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->mode.lane_cnt); if (ret < 0) return ret; @@ -646,7 +736,7 @@ static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) for (i = 0; i < dp->mode.lane_cnt; i++) { u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4; union phy_configure_opts opts = { 0 }; - u8 train = dp->train_set[i]; + u8 train = train_set[i]; opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; @@ -690,7 +780,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp) * So, This loop should exit before 512 iterations */ for (max_tries = 0; max_tries < 512; max_tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -755,7 +845,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) return ret; for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -778,28 +868,29 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) } /** - * zynqmp_dp_train - Train the link + * zynqmp_dp_setup() - Set up major link parameters * @dp: DisplayPort IP core structure + * @bw_code: The link bandwidth as a multiple of 270 MHz + * @lane_cnt: The number of lanes to use + * @enhanced: Use enhanced framing + * @downspread: Enable spread-spectrum clocking * - * Return: 0 if all trains are done successfully, or corresponding error code. + * Return: 0 on success, or -errno on failure */ -static int zynqmp_dp_train(struct zynqmp_dp *dp) +static int zynqmp_dp_setup(struct zynqmp_dp *dp, u8 bw_code, u8 lane_cnt, + bool enhanced, bool downspread) { u32 reg; - u8 bw_code = dp->mode.bw_code; - u8 lane_cnt = dp->mode.lane_cnt; u8 aux_lane_cnt = lane_cnt; - bool enhanced; int ret; zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt); - enhanced = drm_dp_enhanced_frame_cap(dp->dpcd); if (enhanced) { zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1); aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN; } - if (dp->dpcd[3] & 0x1) { + if (downspread) { zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1); drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); @@ -842,8 +933,24 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp) } zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg); - ret = zynqmp_dp_phy_ready(dp); - if (ret < 0) + return zynqmp_dp_phy_ready(dp); +} + +/** + * zynqmp_dp_train - Train the link + * @dp: DisplayPort IP core structure + * + * Return: 0 if all trains are done successfully, or corresponding error code. + */ +static int zynqmp_dp_train(struct zynqmp_dp *dp) +{ + int ret; + + ret = zynqmp_dp_setup(dp, dp->mode.bw_code, dp->mode.lane_cnt, + drm_dp_enhanced_frame_cap(dp->dpcd), + dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + if (ret) return ret; zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1); @@ -934,12 +1041,15 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, u8 *buf, u8 bytes, u8 *reply) { bool is_read = (cmd & AUX_READ_BIT) ? true : false; + unsigned long time_left; u32 reg, i; reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST) return -EBUSY; + reinit_completion(&dp->aux_done); + zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr); if (!is_read) for (i = 0; i < bytes; i++) @@ -954,17 +1064,14 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg); /* Wait for reply to be delivered upto 2ms */ - for (i = 0; ; i++) { - reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) - break; - - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT || - i == 2) - return -ETIMEDOUT; + time_left = wait_for_completion_timeout(&dp->aux_done, + msecs_to_jiffies(2)); + if (!time_left) + return -ETIMEDOUT; - usleep_range(1000, 1100); - } + reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); + if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + return -ETIMEDOUT; reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE); if (reply) @@ -1006,6 +1113,8 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (dp->status == connector_status_disconnected) { dev_dbg(dp->dev, "no connected aux device\n"); + if (dp->ignore_aux_errors) + goto fake_response; return -ENODEV; } @@ -1014,7 +1123,13 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret); - return ret; + if (!dp->ignore_aux_errors) + return ret; + +fake_response: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + memset(msg->buffer, 0, msg->size); + return msg->size; } /** @@ -1048,6 +1163,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) (w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) | (rate / (1000 * 1000))); + zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); + dp->aux.name = "ZynqMP DP AUX"; dp->aux.dev = dp->dev; dp->aux.drm_dev = dp->bridge.dev; @@ -1065,6 +1183,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp) { drm_dp_aux_unregister(&dp->aux); + + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); } /* ----------------------------------------------------------------------------- @@ -1386,8 +1507,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, } /* Check with link rate and lane count */ + mutex_lock(&dp->lock); rate = zynqmp_dp_max_rate(dp->link_config.max_rate, dp->link_config.max_lanes, dp->config.bpp); + mutex_unlock(&dp->lock); if (mode->clock > rate) { dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n", mode->name); @@ -1414,6 +1537,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, pm_runtime_get_sync(dp->dev); + mutex_lock(&dp->lock); zynqmp_dp_disp_enable(dp, old_bridge_state); /* @@ -1474,6 +1598,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET, ZYNQMP_DP_SOFTWARE_RESET_ALL); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1); + mutex_unlock(&dp->lock); } static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, @@ -1481,6 +1606,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct zynqmp_dp *dp = bridge_to_dp(bridge); + mutex_lock(&dp->lock); dp->enabled = false; cancel_work(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0); @@ -1491,6 +1617,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0); zynqmp_dp_disp_disable(dp, old_bridge_state); + mutex_unlock(&dp->lock); pm_runtime_put_sync(dp->dev); } @@ -1526,13 +1653,14 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp) { - struct zynqmp_dp *dp = bridge_to_dp(bridge); struct zynqmp_dp_link_config *link_config = &dp->link_config; u32 state, i; int ret; + lockdep_assert_held(&dp->lock); + /* * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to * get the HPD signal with some monitors. @@ -1568,6 +1696,18 @@ disconnected: return connector_status_disconnected; } +static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + enum drm_connector_status ret; + + mutex_lock(&dp->lock); + ret = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + + return ret; +} + static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { @@ -1605,6 +1745,582 @@ zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge, return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts); } +/* ----------------------------------------------------------------------------- + * debugfs + */ + +/** + * zynqmp_dp_set_test_pattern() - Configure the link for a test pattern + * @dp: DisplayPort IP core structure + * @pattern: The test pattern to configure + * @custom: The custom pattern to use if @pattern is %TEST_80BIT_CUSTOM + * + * Return: 0 on success, or negative errno on (DPCD) failure + */ +static int zynqmp_dp_set_test_pattern(struct zynqmp_dp *dp, + enum test_pattern pattern, + u8 *const custom) +{ + bool scramble = false; + u32 train_pattern = 0; + u32 link_pattern = 0; + u8 dpcd_train = 0; + u8 dpcd_link = 0; + int ret; + + switch (pattern) { + case TEST_TPS1: + train_pattern = 1; + break; + case TEST_TPS2: + train_pattern = 2; + break; + case TEST_TPS3: + train_pattern = 3; + break; + case TEST_SYMBOL_ERROR: + scramble = true; + link_pattern = DP_PHY_TEST_PATTERN_ERROR_COUNT; + break; + case TEST_PRBS7: + /* We use a dedicated register to enable PRBS7 */ + dpcd_link = DP_LINK_QUAL_PATTERN_ERROR_RATE; + break; + case TEST_80BIT_CUSTOM: { + const u8 *p = custom; + + link_pattern = DP_LINK_QUAL_PATTERN_80BIT_CUSTOM; + + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_1, + (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_2, + (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_3, + (p[9] << 8) | p[8]); + break; + } + case TEST_CP2520: + link_pattern = DP_LINK_QUAL_PATTERN_CP2520_PAT_1; + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case TEST_VIDEO: + scramble = true; + } + + zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, !scramble); + zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, train_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_LINK_QUAL_PATTERN_SET, link_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMIT_PRBS7, pattern == TEST_PRBS7); + + dpcd_link = dpcd_link ?: link_pattern; + dpcd_train = train_pattern; + if (!scramble) + dpcd_train |= DP_LINK_SCRAMBLING_DISABLE; + + if (dp->dpcd[DP_DPCD_REV] < 0x12) { + if (pattern == TEST_CP2520) + dev_warn(dp->dev, + "can't set sink link quality pattern to CP2520 for DPCD < r1.2; error counters will be invalid\n"); + else + dpcd_train |= FIELD_PREP(DP_LINK_QUAL_PATTERN_11_MASK, + dpcd_link); + } else { + u8 dpcd_link_lane[ZYNQMP_DP_MAX_LANES]; + + memset(dpcd_link_lane, dpcd_link, ZYNQMP_DP_MAX_LANES); + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_QUAL_LANE0_SET, + dpcd_link_lane, ZYNQMP_DP_MAX_LANES); + if (ret < 0) + return ret; + } + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, dpcd_train); + return ret < 0 ? ret : 0; +} + +static int zynqmp_dp_test_setup(struct zynqmp_dp *dp) +{ + return zynqmp_dp_setup(dp, dp->test.bw_code, dp->test.link_cnt, + dp->test.enhanced, dp->test.downspread); +} + +static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = snprintf(buf, sizeof(buf), "%s\n", + test_pattern_str[dp->test.pattern]); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +static ssize_t zynqmp_dp_pattern_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + int pattern; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, + count); + if (ret < 0) + goto out; + buf[ret] = '\0'; + + pattern = sysfs_match_string(test_pattern_str, buf); + if (pattern < 0) { + ret = -EINVAL; + goto out; + } + + mutex_lock(&dp->lock); + dp->test.pattern = pattern; + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_pattern = { + .read = zynqmp_dp_pattern_read, + .write = zynqmp_dp_pattern_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_enhanced_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.enhanced; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_enhanced_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.enhanced = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get, + zynqmp_dp_enhanced_set, "%llu\n"); + +static int zynqmp_dp_downspread_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.downspread; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_downspread_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.downspread = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get, + zynqmp_dp_downspread_set, "%llu\n"); + +static int zynqmp_dp_active_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.active; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_active_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + if (val) { + if (val < 2) { + ret = zynqmp_dp_test_setup(dp); + if (ret) + goto out; + } + + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom); + if (ret) + goto out; + + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + if (ret) + goto out; + + dp->test.active = true; + } else { + int err; + + dp->test.active = false; + err = zynqmp_dp_set_test_pattern(dp, TEST_VIDEO, NULL); + if (err) + dev_warn(dp->dev, "could not clear test pattern: %d\n", + err); + zynqmp_dp_train_loop(dp); + } +out: + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get, + zynqmp_dp_active_set, "%llu\n"); + +static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom, + sizeof(dp->test.custom)); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return ret; +} + +static ssize_t zynqmp_dp_custom_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + char buf[sizeof(dp->test.custom)]; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (ret < 0) + goto out; + + mutex_lock(&dp->lock); + memcpy(dp->test.custom, buf, ret); + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_custom = { + .read = zynqmp_dp_custom_read, + .write = zynqmp_dp_custom_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_swing_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_swing_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 3) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_SWING_REACHED | + DP_TRAIN_VOLTAGE_SWING_MASK); + *train_set |= val; + if (val == 3) + *train_set |= DP_TRAIN_MAX_SWING_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get, + zynqmp_dp_swing_set, "%llu\n"); + +static int zynqmp_dp_preemphasis_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, + dp->test.train_set[priv->lane]); + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_preemphasis_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 2) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | + DP_TRAIN_PRE_EMPHASIS_MASK); + *train_set |= val; + if (val == 2) + *train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get, + zynqmp_dp_preemphasis_set, "%llu\n"); + +static int zynqmp_dp_lanes_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.link_cnt; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_lanes_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + if (val > ZYNQMP_DP_MAX_LANES) + return -EINVAL; + + mutex_lock(&dp->lock); + if (val > dp->num_lanes) { + ret = -EINVAL; + } else { + dp->test.link_cnt = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + } + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get, + zynqmp_dp_lanes_set, "%llu\n"); + +static int zynqmp_dp_rate_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_rate_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int link_rate; + int ret = 0; + u8 bw_code; + + if (do_div(val, 10000)) + return -EINVAL; + + bw_code = drm_dp_link_rate_to_bw_code(val); + link_rate = drm_dp_bw_code_to_link_rate(bw_code); + if (val != link_rate) + return -EINVAL; + + if (bw_code != DP_LINK_BW_1_62 && bw_code != DP_LINK_BW_2_7 && + bw_code != DP_LINK_BW_5_4) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->test.bw_code = bw_code; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get, + zynqmp_dp_rate_set, "%llu\n"); + +static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->aux.hw_mutex); + *val = dp->ignore_aux_errors; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->aux.hw_mutex); + dp->ignore_aux_errors = val; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_aux_errors, + zynqmp_dp_ignore_aux_errors_get, + zynqmp_dp_ignore_aux_errors_set, "%llu\n"); + +static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->ignore_hpd; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_ignore_hpd_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->ignore_hpd = val; + mutex_lock(&dp->lock); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_hpd, zynqmp_dp_ignore_hpd_get, + zynqmp_dp_ignore_hpd_set, "%llu\n"); + +static void zynqmp_dp_bridge_debugfs_init(struct drm_bridge *bridge, + struct dentry *root) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + struct dentry *test; + int i; + + dp->test.bw_code = DP_LINK_BW_5_4; + dp->test.link_cnt = dp->num_lanes; + + test = debugfs_create_dir("test", root); +#define CREATE_FILE(name) \ + debugfs_create_file(#name, 0600, test, dp, &fops_zynqmp_dp_##name) + CREATE_FILE(pattern); + CREATE_FILE(enhanced); + CREATE_FILE(downspread); + CREATE_FILE(active); + CREATE_FILE(custom); + CREATE_FILE(rate); + CREATE_FILE(lanes); + CREATE_FILE(ignore_aux_errors); + CREATE_FILE(ignore_hpd); + + for (i = 0; i < dp->num_lanes; i++) { + static const char fmt[] = "lane%d_preemphasis"; + char name[sizeof(fmt)]; + + dp->debugfs_train_set[i].dp = dp; + dp->debugfs_train_set[i].lane = i; + + snprintf(name, sizeof(name), fmt, i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_preemphasis); + + snprintf(name, sizeof(name), "lane%d_swing", i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_swing); + } +} + static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .attach = zynqmp_dp_bridge_attach, .detach = zynqmp_dp_bridge_detach, @@ -1618,6 +2334,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .detect = zynqmp_dp_bridge_detect, .edid_read = zynqmp_dp_bridge_edid_read, .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts, + .debugfs_init = zynqmp_dp_bridge_debugfs_init, }; /* ----------------------------------------------------------------------------- @@ -1651,10 +2368,46 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work) struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work); enum drm_connector_status status; - status = zynqmp_dp_bridge_detect(&dp->bridge); + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + status = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + drm_bridge_hpd_notify(&dp->bridge, status); } +static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work) +{ + struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, + hpd_irq_work); + u8 status[DP_LINK_STATUS_SIZE + 2]; + int err; + + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, + DP_LINK_STATUS_SIZE + 2); + if (err < 0) { + dev_dbg_ratelimited(dp->dev, + "could not read sink status: %d\n", err); + } else { + if (status[4] & DP_LINK_STATUS_UPDATED || + !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || + !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { + zynqmp_dp_train_loop(dp); + } + } + mutex_unlock(&dp->lock); +} + static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) { struct zynqmp_dp *dp = (struct zynqmp_dp *)data; @@ -1686,23 +2439,15 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) if (status & ZYNQMP_DP_INT_HPD_EVENT) schedule_work(&dp->hpd_work); - if (status & ZYNQMP_DP_INT_HPD_IRQ) { - int ret; - u8 status[DP_LINK_STATUS_SIZE + 2]; + if (status & ZYNQMP_DP_INT_HPD_IRQ) + schedule_work(&dp->hpd_irq_work); - ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, - DP_LINK_STATUS_SIZE + 2); - if (ret < 0) - goto handled; + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) + complete(&dp->aux_done); - if (status[4] & DP_LINK_STATUS_UPDATED || - !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || - !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { - zynqmp_dp_train_loop(dp); - } - } + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + complete(&dp->aux_done); -handled: return IRQ_HANDLED; } @@ -1725,8 +2470,11 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) dp->dev = &pdev->dev; dp->dpsub = dpsub; dp->status = connector_status_disconnected; + mutex_init(&dp->lock); + init_completion(&dp->aux_done); INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); + INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func); /* Acquire all resources (IOMEM, IRQ and PHYs). */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp"); @@ -1802,9 +2550,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) * Now that the hardware is initialized and won't generate spurious * interrupts, request the IRQ. */ - ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL, - zynqmp_dp_irq_handler, IRQF_ONESHOT, - dev_name(dp->dev), dp); + ret = devm_request_irq(dp->dev, dp->irq, zynqmp_dp_irq_handler, + IRQF_SHARED, dev_name(dp->dev), dp); if (ret < 0) goto err_phy_exit; @@ -1829,8 +2576,9 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) struct zynqmp_dp *dp = dpsub->dp; zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL); - disable_irq(dp->irq); + devm_free_irq(dp->dev, dp->irq, dp); + cancel_work_sync(&dp->hpd_irq_work); cancel_work_sync(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0); @@ -1838,4 +2586,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) zynqmp_dp_phy_exit(dp); zynqmp_dp_reset(dp, true); + mutex_destroy(&dp->lock); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index bd1368df7870..fc81983d9e5e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_blend.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> @@ -402,6 +403,7 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = { DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &zynqmp_dpsub_drm_fops, @@ -509,12 +511,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) if (ret) return ret; - drm_kms_helper_poll_init(drm); - ret = zynqmp_dpsub_kms_init(dpsub); if (ret < 0) goto err_poll_fini; + drm_kms_helper_poll_init(drm); + /* Reset all components and register the DRM device. */ drm_mode_config_reset(drm); @@ -523,7 +525,7 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) goto err_poll_fini; /* Initialize fbdev generic emulation. */ - drm_fbdev_dma_setup(drm, 24); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888); return 0; @@ -536,7 +538,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub) { struct drm_device *drm = &dpsub->drm->dev; - drm_dev_unregister(drm); + drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); drm_encoder_cleanup(&dpsub->drm->encoder); drm_kms_helper_poll_fini(drm); |
