diff options
author | Otavio Salvador <otavio@ossystems.com.br> | 2019-12-19 00:14:33 -0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-12-19 00:14:33 -0300 |
commit | fa8a03bec68d9ef89da72277bd3501ed3daa6217 (patch) | |
tree | 3d3218332bcb34cb0afa01d6ad996058a3dbcb77 /drivers/gpu | |
parent | 6b774eec1f9d3064e9b33634dfa99d5666d0a73a (diff) | |
parent | 1ddf624b0b268fdc0b80b1de618b98f8d117afea (diff) |
Merge pull request #55 from toradex/4.14-2.0.x-imx
4.14 2.0.x imx
Diffstat (limited to 'drivers/gpu')
74 files changed, 1212 insertions, 420 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4dd68d821353..4894d8a87c04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -572,6 +572,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret == -EPROBE_DEFER) return ret; +#ifdef CONFIG_DRM_AMDGPU_SI + if (!amdgpu_si_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + dev_info(&pdev->dev, + "SI support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + if (!amdgpu_cik_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + dev_info(&pdev->dev, + "CIK support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif + /* Get rid of things like offb */ ret = amdgpu_kick_out_firmware_fb(pdev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e16229000a98..c93e72d8ac5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) struct amdgpu_device *adev; int r, acpi_status; -#ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(dev->dev, - "SI support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(dev->dev, - "CIK support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif - adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; @@ -540,6 +505,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + if (info->read_mmr_reg.count > 128) + return -EINVAL; + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b2eecfc9042e..53186c5e1066 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1534,25 +1534,6 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v9_0_init_compute_vmid(adev); - - mutex_lock(&adev->grbm_idx_mutex); - /* - * making sure that the following register writes will be broadcasted - * to all the shaders - */ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, - (adev->gfx.config.sc_prim_fifo_size_frontend << - PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_prim_fifo_size_backend << - PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_hiz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_earlyz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); - mutex_unlock(&adev->grbm_idx_mutex); - } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 40520a968eac..28eea8317e87 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1783,7 +1783,7 @@ static void si_program_aspm(struct amdgpu_device *adev) if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) { + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) { orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); data &= ~PLL_RAMP_UP_TIME_0_MASK; if (orig != data) @@ -1832,14 +1832,14 @@ static void si_program_aspm(struct amdgpu_device *adev) orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 336fdd8c7db0..61141bc3edfe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3972,6 +3972,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) data->frame_time_x2 = frame_time_in_us * 2 / 100; + if (data->frame_time_x2 < 280) { + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); + data->frame_time_x2 = 280; + } + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 72b22b805412..6f03700a94be 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, clk_disable_unprepare(hdlcd->clk); } -static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - struct drm_display_mode *mode = &state->adjusted_mode; long rate, clk_rate = mode->clock * 1000; rate = clk_round_rate(hdlcd->clk, clk_rate); - if (rate != clk_rate) { + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */ + if (abs(rate - clk_rate) * 1000 > clk_rate) { /* clock required by mode not supported by hardware */ - return -EINVAL; + return MODE_NOCLOCK; } - return 0; + return MODE_OK; } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, @@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .atomic_check = hdlcd_crtc_atomic_check, + .mode_valid = hdlcd_crtc_mode_valid, .atomic_begin = hdlcd_crtc_atomic_begin, .atomic_enable = hdlcd_crtc_atomic_enable, .atomic_disable = hdlcd_crtc_atomic_disable, diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 373700c05a00..224fa1ef87ff 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) /* Enable extended register access */ - ast_enable_mmio(dev); ast_open_key(ast); + ast_enable_mmio(dev); /* Find out whether P2A works or whether to use device-tree */ ast_detect_config_mode(dev, &scu_rev); @@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); + ast_release_firmware(dev); kfree(ast->dp501_fw_addr); ast_mode_fini(dev); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 216c772ab821..6ed2124925b5 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -600,7 +600,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; ast_open_key(ast); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index f7d421359d56..c1d1ac51d1c2 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 593e9a4e9ed6..58db4bbc7ab8 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -35,6 +35,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 60373d7eb220..109ab4c3df50 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -261,10 +261,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, struct regmap *regmap = sii902x->regmap; u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; struct hdmi_avi_infoframe frame; + u16 pixel_clock_10kHz = adj->clock / 10; int ret; - buf[0] = adj->clock; - buf[1] = adj->clock >> 8; + buf[0] = pixel_clock_10kHz & 0xff; + buf[1] = pixel_clock_10kHz >> 8; buf[2] = adj->vrefresh; buf[3] = 0x00; buf[4] = adj->hdisplay; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 6eebd8ad0c52..cefa2c1685ba 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -300,7 +300,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct tc_data *tc = aux_to_tc(aux); - size_t size = min_t(size_t, 8, msg->size); + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size); u8 request = msg->request & ~DP_AUX_I2C_MOT; u8 *buf = msg->buffer; u32 tmp = 0; @@ -1147,6 +1147,13 @@ static int tc_connector_get_modes(struct drm_connector *connector) struct tc_data *tc = connector_to_tc(connector); struct edid *edid; unsigned int count; + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { count = tc->panel->funcs->get_modes(tc->panel); diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index acb857030951..1bb01905be8e 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -64,7 +64,12 @@ static int tfp410_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return ret; + fallback: /* No EDID, fallback on the XGA standard modes */ ret = drm_add_modes_noedid(connector, 1920, 1200); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 6ae7fa23df9f..d341c3dc1676 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1321,7 +1321,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); + + if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) + return -EFAULT; + return 0; } int drm_legacy_infobufs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index f9e26dda56d6..2901b7944068 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc) static void crtc_crc_cleanup(struct drm_crtc_crc *crc) { kfree(crc->entries); + crc->overflow = false; crc->entries = NULL; crc->head = 0; crc->tail = 0; @@ -359,12 +360,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entry; int head, tail; + unsigned long flags; - spin_lock(&crc->lock); + spin_lock_irqsave(&crc->lock, flags); /* Caller may not have noticed yet that userspace has stopped reading */ if (!crc->entries) { - spin_unlock(&crc->lock); + spin_unlock_irqrestore(&crc->lock, flags); return -EINVAL; } @@ -372,8 +374,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, tail = crc->tail; if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) { - spin_unlock(&crc->lock); - DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); + bool was_overflow = crc->overflow; + + crc->overflow = true; + spin_unlock_irqrestore(&crc->lock, flags); + + if (!was_overflow) + DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); + return -ENOBUFS; } @@ -385,7 +393,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); crc->head = head; - spin_unlock(&crc->lock); + spin_unlock_irqrestore(&crc->lock, flags); wake_up_interruptible(&crc->wq); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7a9b3a700fe4..c90f8680fb46 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -166,6 +166,9 @@ static const struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 1c0495acf341..06656acea420 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -274,6 +274,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector) * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); + if (!fwstr) + return ERR_PTR(-ENOMEM); edidstr = fwstr; while ((edidname = strsep(&edidstr, ","))) { diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 55d92fa8b01c..bf5fff4ee98c 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -774,7 +774,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) struct drm_device *dev = fb->dev; struct drm_atomic_state *state; struct drm_plane *plane; - struct drm_connector *conn; + struct drm_connector *conn __maybe_unused; struct drm_connector_state *conn_state; int i, ret = 0; unsigned plane_mask; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c55f338e380b..d2c042af36b8 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1035,6 +1035,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return -EACCES; } + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags &= ~VM_MAYWRITE; + } + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index f8e96e648acf..bfeeb6a56135 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -372,7 +372,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)); + + if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) + return -EFAULT; + return 0; } static int drm_legacy_infobufs32(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 3bd43121bda9..baddc666d5b2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -593,6 +593,9 @@ static void output_poll_execute(struct work_struct *work) enum drm_connector_status old_status; bool repoll = false, changed; + if (!dev->mode_config.poll_enabled) + return; + /* Pick up any changes detected by the probe functions. */ changed = dev->mode_config.delayed_event; dev->mode_config.delayed_event = false; @@ -747,7 +750,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); */ void drm_kms_helper_poll_fini(struct drm_device *dev) { - drm_kms_helper_poll_disable(dev); + if (!dev->mode_config.poll_enabled) + return; + + dev->mode_config.poll_enabled = false; + cancel_delayed_work_sync(&dev->mode_config.output_poll_work); } EXPORT_SYMBOL(drm_kms_helper_poll_fini); diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 576a417690d4..128d6cfb7bbb 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -721,7 +721,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev, if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; if (sarea_priv->dirty) @@ -1041,7 +1041,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in if (u != I810_BUF_CLIENT) DRM_DEBUG("MC found buffer that isn't mine!\n"); - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; sarea_priv->dirty = 0x7f; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 8ba932b22f7c..e4b9eb1f6b60 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -26,6 +26,7 @@ */ #include "i915_drv.h" +#include "intel_ringbuffer.h" /** * DOC: batch buffer command parser @@ -50,13 +51,11 @@ * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should - * only be used by the kernel driver. The parser generally rejects such - * commands, though it may allow some from the drm master process. + * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser - * provides a whitelist of registers which userspace may safely access (for both - * normal and drm master processes). + * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. @@ -81,9 +80,9 @@ * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories - * mentioned above: rejected, master-only, register whitelist. The parser - * implements a number of checks, including the privileged memory checks, via a - * general bitmasking mechanism. + * mentioned above: rejected, register whitelist. The parser implements a number + * of checks, including the privileged memory checks, via a general bitmasking + * mechanism. */ /* @@ -101,8 +100,6 @@ struct drm_i915_cmd_descriptor { * CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REGISTER: The command should be checked against the * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master */ u32 flags; #define CMD_DESC_FIXED (1<<0) @@ -110,7 +107,6 @@ struct drm_i915_cmd_descriptor { #define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) /* * The command's unique identification bits and the bitmask to get them. @@ -191,7 +187,7 @@ struct drm_i915_cmd_table { #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), ~0u << (opm) }, \ + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } @@ -206,14 +202,13 @@ struct drm_i915_cmd_table { #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK -#define M CMD_DESC_MASTER /* Command Mask Fixed Len Action ---------------------------------------------------------- */ -static const struct drm_i915_cmd_descriptor common_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = { CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ), CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), @@ -243,7 +238,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), }; -static const struct drm_i915_cmd_descriptor render_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = { CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_PREDICATE, SMI, F, 1, S ), @@ -310,7 +305,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ), - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), @@ -327,7 +322,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), }; -static const struct drm_i915_cmd_descriptor video_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -371,7 +366,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { CMD( MFX_WAIT, SMFX, F, 1, S ), }; -static const struct drm_i915_cmd_descriptor vecs_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -409,7 +404,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { }}, ), }; -static const struct drm_i915_cmd_descriptor blt_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = { CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, .bits = {{ @@ -443,10 +438,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { }; static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +/* + * For Gen9 we can still rely on the h/w to enforce cmd security, and only + * need to re-enforce the register access checks. We therefore only need to + * teach the cmdparser how to find the end of each command, and identify + * register accesses. The table doesn't need to reject any commands, and so + * the only commands listed here are: + * 1) Those that touch registers + * 2) Those that do not have the default 8-bit length + * + * Note that the default MI length mask chosen for this table is 0xFF, not + * the 0x3F used on older devices. This is because the vast majority of MI + * cmds on Gen9 use a standard 8-bit Length field. + * All the Gen9 blitter instructions are standard 0xFF length mask, and + * none allow access to non-general registers, so in fact no BLT cmds are + * included in the table at all. + * + */ +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = { + CMD( MI_NOOP, SMI, F, 1, S ), + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ), + CMD( MI_FLUSH, SMI, F, 1, S ), + CMD( MI_ARB_CHECK, SMI, F, 1, S ), + CMD( MI_REPORT_HEAD, SMI, F, 1, S ), + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ), + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ), + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ), + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ), + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), + + /* + * We allow BB_START but apply further checks. We just sanitize the + * basic fields here. + */ +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1) + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B, + .bits = {{ + .offset = 0, + .mask = MI_BB_START_OPERAND_MASK, + .expected = MI_BB_START_OPERAND_EXPECT, + }}, ), +}; + static const struct drm_i915_cmd_descriptor noop_desc = CMD(MI_NOOP, SMI, F, 1, S); @@ -460,40 +509,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = #undef R #undef W #undef B -#undef M -static const struct drm_i915_cmd_table gen7_render_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, }; -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, }; -static const struct drm_i915_cmd_table gen7_video_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { video_cmds, ARRAY_SIZE(video_cmds) }, +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) }, }; -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) }, }; -static const struct drm_i915_cmd_table gen7_blt_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, }; -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, }; +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = { + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) }, +}; + + /* * Register whitelists, sorted by increasing register offset. */ @@ -609,17 +662,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; -static const struct drm_i915_reg_descriptor ivb_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), -}; - -static const struct drm_i915_reg_descriptor hsw_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), + REG64_IDX(BCS_GPR, 3), + REG64_IDX(BCS_GPR, 4), + REG64_IDX(BCS_GPR, 5), + REG64_IDX(BCS_GPR, 6), + REG64_IDX(BCS_GPR, 7), + REG64_IDX(BCS_GPR, 8), + REG64_IDX(BCS_GPR, 9), + REG64_IDX(BCS_GPR, 10), + REG64_IDX(BCS_GPR, 11), + REG64_IDX(BCS_GPR, 12), + REG64_IDX(BCS_GPR, 13), + REG64_IDX(BCS_GPR, 14), + REG64_IDX(BCS_GPR, 15), }; #undef REG64 @@ -628,28 +691,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { struct drm_i915_reg_table { const struct drm_i915_reg_descriptor *regs; int num_regs; - bool master; }; static const struct drm_i915_reg_table ivb_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, }; static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, }; static const struct drm_i915_reg_table hsw_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) }, }; static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, +}; + +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = { + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) }, }; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) @@ -707,6 +769,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + static bool validate_cmds_sorted(const struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) @@ -798,22 +871,15 @@ struct cmd_node { */ static inline u32 cmd_header_key(u32 x) { - u32 shift; - switch (x >> INSTR_CLIENT_SHIFT) { default: case INSTR_MI_CLIENT: - shift = STD_MI_OPCODE_SHIFT; - break; + return x >> STD_MI_OPCODE_SHIFT; case INSTR_RC_CLIENT: - shift = STD_3D_OPCODE_SHIFT; - break; + return x >> STD_3D_OPCODE_SHIFT; case INSTR_BC_CLIENT: - shift = STD_2D_OPCODE_SHIFT; - break; + return x >> STD_2D_OPCODE_SHIFT; } - - return x >> shift; } static int init_hash_table(struct intel_engine_cs *engine, @@ -871,18 +937,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->i915)) + if (!IS_GEN7(engine->i915) && !(IS_GEN9(engine->i915) && + engine->id == BCS)) return; switch (engine->id) { case RCS: if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_render_ring_cmds; + cmd_tables = hsw_render_ring_cmd_table; cmd_table_count = - ARRAY_SIZE(hsw_render_ring_cmds); + ARRAY_SIZE(hsw_render_ring_cmd_table); } else { - cmd_tables = gen7_render_cmds; - cmd_table_count = ARRAY_SIZE(gen7_render_cmds); + cmd_tables = gen7_render_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table); } if (IS_HASWELL(engine->i915)) { @@ -892,36 +959,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->reg_tables = ivb_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VCS: - cmd_tables = gen7_video_cmds; - cmd_table_count = ARRAY_SIZE(gen7_video_cmds); + cmd_tables = gen7_video_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_blt_ring_cmds; - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + if (IS_GEN9(engine->i915)) { + cmd_tables = gen9_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table); + engine->get_cmd_length_mask = + gen9_blt_get_cmd_length_mask; + + /* BCS Engine unsafe without parser */ + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER; + } else if (IS_HASWELL(engine->i915)) { + cmd_tables = hsw_blt_ring_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table); } else { - cmd_tables = gen7_blt_cmds; - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); + cmd_tables = gen7_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table); } - if (IS_HASWELL(engine->i915)) { + if (IS_GEN9(engine->i915)) { + engine->reg_tables = gen9_blt_reg_tables; + engine->reg_table_count = + ARRAY_SIZE(gen9_blt_reg_tables); + } else if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { engine->reg_tables = ivb_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VECS: - cmd_tables = hsw_vebox_cmds; - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); + cmd_tables = hsw_vebox_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; @@ -947,7 +1024,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) return; } - engine->needs_cmd_parser = true; + engine->flags |= I915_ENGINE_USING_CMD_PARSER; } /** @@ -959,7 +1036,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) */ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { - if (!engine->needs_cmd_parser) + if (!intel_engine_using_cmd_parser(engine)) return; fini_hash_table(engine); @@ -1033,22 +1110,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) } static const struct drm_i915_reg_descriptor * -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, u32 addr) { const struct drm_i915_reg_table *table = engine->reg_tables; + const struct drm_i915_reg_descriptor *reg = NULL; int count = engine->reg_table_count; - do { - if (!table->master || is_master) { - const struct drm_i915_reg_descriptor *reg; - - reg = __find_reg(table->regs, table->num_regs, addr); - if (reg != NULL) - return reg; - } - } while (table++, --count); + for (; !reg && (count > 0); ++table, --count) + reg = __find_reg(table->regs, table->num_regs, addr); - return NULL; + return reg; } /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ @@ -1133,8 +1204,7 @@ unpin_src: static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, - const u32 *cmd, u32 length, - const bool is_master) + const u32 *cmd, u32 length) { if (desc->flags & CMD_DESC_SKIP) return true; @@ -1144,12 +1214,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - if ((desc->flags & CMD_DESC_MASTER) && !is_master) { - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", - *cmd); - return false; - } - if (desc->flags & CMD_DESC_REGISTER) { /* * Get the distance between individual register offset @@ -1163,7 +1227,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(engine, is_master, reg_addr); + find_reg(engine, reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", @@ -1218,6 +1282,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, continue; } + if (desc->bits[i].offset >= length) { + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n", + *cmd, engine->name); + return false; + } + dword = cmd[desc->bits[i].offset] & desc->bits[i].mask; @@ -1235,16 +1305,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, return true; } +static int check_bbstart(const struct i915_gem_context *ctx, + u32 *cmd, u32 offset, u32 length, + u32 batch_len, + u64 batch_start, + u64 shadow_batch_start) +{ + u64 jump_offset, jump_target; + u32 target_cmd_offset, target_cmd_index; + + /* For igt compatibility on older platforms */ + if (CMDPARSER_USES_GGTT(ctx->i915)) { + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); + return -EACCES; + } + + if (length != 3) { + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n", + length); + return -EINVAL; + } + + jump_target = *(u64*)(cmd+1); + jump_offset = jump_target - batch_start; + + /* + * Any underflow of jump_target is guaranteed to be outside the range + * of a u32, so >= test catches both too large and too small + */ + if (jump_offset >= batch_len) { + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n", + jump_target); + return -EINVAL; + } + + /* + * This cannot overflow a u32 because we already checked jump_offset + * is within the BB, and the batch_len is a u32 + */ + target_cmd_offset = lower_32_bits(jump_offset); + target_cmd_index = target_cmd_offset / sizeof(u32); + + *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset; + + if (target_cmd_index == offset) + return 0; + + if (ctx->jump_whitelist_cmds <= target_cmd_index) { + DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n"); + return -EINVAL; + } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) { + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", + jump_target); + return -EINVAL; + } + + return 0; +} + +static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len) +{ + const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32)); + const u32 exact_size = BITS_TO_LONGS(batch_cmds); + u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds)); + unsigned long *next_whitelist; + + if (CMDPARSER_USES_GGTT(ctx->i915)) + return; + + if (batch_cmds <= ctx->jump_whitelist_cmds) { + bitmap_zero(ctx->jump_whitelist, batch_cmds); + return; + } + +again: + next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL); + if (next_whitelist) { + kfree(ctx->jump_whitelist); + ctx->jump_whitelist = next_whitelist; + ctx->jump_whitelist_cmds = + next_size * BITS_PER_BYTE * sizeof(long); + return; + } + + if (next_size > exact_size) { + next_size = exact_size; + goto again; + } + + DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n"); + bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds); + + return; +} + #define LENGTH_BIAS 2 /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * @ctx: the context in which the batch is to execute * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question - * @shadow_batch_obj: copy of the batch buffer in question + * @batch_start: Canonical base address of batch * @batch_start_offset: byte offset in the batch at which execution starts * @batch_len: length of the commands in batch_obj - * @is_master: is the submitting process the drm master? + * @shadow_batch_obj: copy of the batch buffer in question + * @shadow_batch_start: Canonical base address of shadow_batch_obj * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1252,14 +1418,17 @@ static bool check_cmd(const struct intel_engine_cs *engine, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int intel_engine_cmd_parser(struct intel_engine_cs *engine, + +int intel_engine_cmd_parser(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master) + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start) { - u32 *cmd, *batch_end; + u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; bool needs_clflush_after = false; @@ -1273,6 +1442,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, return PTR_ERR(cmd); } + init_whitelist(ctx, batch_len); + /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra @@ -1282,31 +1453,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, do { u32 length; - if (*cmd == MI_BATCH_BUFFER_END) { - if (needs_clflush_after) { - void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); - drm_clflush_virt_range(ptr, - (void *)(cmd + 1) - ptr); - } + if (*cmd == MI_BATCH_BUFFER_END) break; - } desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); ret = -EINVAL; - break; - } - - /* - * If the batch buffer contains a chained batch, return an - * error that tells the caller to abort and dispatch the - * workload as a non-secure batch. - */ - if (desc->cmd.value == MI_BATCH_BUFFER_START) { - ret = -EACCES; - break; + goto err; } if (desc->flags & CMD_DESC_FIXED) @@ -1320,22 +1475,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, length, batch_end - cmd); ret = -EINVAL; - break; + goto err; } - if (!check_cmd(engine, desc, cmd, length, is_master)) { + if (!check_cmd(engine, desc, cmd, length)) { ret = -EACCES; + goto err; + } + + if (desc->cmd.value == MI_BATCH_BUFFER_START) { + ret = check_bbstart(ctx, cmd, offset, length, + batch_len, batch_start, + shadow_batch_start); + + if (ret) + goto err; break; } + if (ctx->jump_whitelist_cmds > offset) + set_bit(offset, ctx->jump_whitelist); + cmd += length; + offset += length; if (cmd >= batch_end) { DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); ret = -EINVAL; - break; + goto err; } } while (1); + if (needs_clflush_after) { + void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); + + drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr); + } + +err: i915_gem_object_unpin_map(shadow_batch_obj); return ret; } @@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_engine(engine, dev_priv, id) { - if (engine->needs_cmd_parser) { + if (intel_engine_using_cmd_parser(engine)) { active = true; break; } @@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * the parser enabled. * 9. Don't whitelist or handle oacontrol specially, as ownership * for oacontrol state is moving to i915-perf. + * 10. Support for Gen9 BCS Parsing */ - return 9; + return 10; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c75f4ccbcdef..02a2af7c8166 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -323,7 +323,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = i915.semaphores; break; case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); + value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN); break; case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(dev_priv); @@ -1564,6 +1564,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) disable_rpm_wakeref_asserts(dev_priv); intel_display_set_init_power(dev_priv, false); + i915_rc6_ctx_wa_suspend(dev_priv); fw_csr = !IS_GEN9_LP(dev_priv) && suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; @@ -1800,6 +1801,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_display_set_init_power(dev_priv, true); i915_gem_sanitize(dev_priv); + i915_rc6_ctx_wa_resume(dev_priv); enable_rpm_wakeref_asserts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 41f51509c9e4..a5fb7404b29e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1320,6 +1320,7 @@ struct intel_gen6_power_mgmt { enum { LOW_POWER, BETWEEN, HIGH_POWER } power; bool enabled; + bool ctx_corrupted; struct delayed_work autoenable_work; atomic_t num_waiters; atomic_t boosts; @@ -2980,6 +2981,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) +/* + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution + * All later gens can run the final buffer from the ppgtt + */ +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv) + #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) #define BSD_RING ENGINE_MASK(VCS) @@ -2996,6 +3003,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) + #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) @@ -3017,9 +3026,12 @@ intel_info(const struct drm_i915_private *dev_priv) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ + (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9) + /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) + (INTEL_GEN(dev_priv) == 9) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts @@ -3391,6 +3403,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); +struct i915_vma * __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags); + int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); @@ -3841,12 +3861,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, +int intel_engine_cmd_parser(struct i915_gem_context *cxt, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 user_batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master); + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start); /* i915_perf.c */ extern void i915_perf_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 727018a16cca..9263b65720bc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1834,6 +1834,10 @@ int i915_gem_fault(struct vm_fault *vmf) unsigned int flags; int ret; + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; @@ -3239,6 +3243,12 @@ i915_gem_idle_work_handler(struct work_struct *work) if (INTEL_GEN(dev_priv) >= 6) gen6_rps_idle(dev_priv); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) { + i915_rc6_ctx_wa_check(dev_priv); + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + } + intel_runtime_pm_put(dev_priv); out_unlock: mutex_unlock(&dev->struct_mutex); @@ -3996,6 +4006,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_address_space *vm = &dev_priv->ggtt.base; + + return i915_gem_object_pin(obj, vm, view, size, alignment, + flags | PIN_GLOBAL); +} + +struct i915_vma * +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; int ret; @@ -4053,7 +4077,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin(vma, size, alignment, flags); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8afd2ce59b8d..3925a63c1661 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -141,6 +141,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) __i915_gem_object_release_unless_active(ce->state->obj); } + kfree(ctx->jump_whitelist); + kfree(ctx->name); put_pid(ctx->pid); @@ -321,6 +323,9 @@ __create_hw_context(struct drm_i915_private *dev_priv, else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; + ctx->jump_whitelist = NULL; + ctx->jump_whitelist_cmds = 0; + return ctx; err_pid: diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 44688e22a5c2..b651c5f427b9 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -181,6 +181,12 @@ struct i915_gem_context { /** remap_slice: Bitmask of cache lines that need remapping */ u8 remap_slice; + /** jump_whitelist: Bit array for tracking cmds during cmdparsing */ + unsigned long *jump_whitelist; + + /** jump_whitelist_cmds: No of cmd slots available */ + u32 jump_whitelist_cmds; + /** handles_vma: rbtree to look up our context specific obj/vma for * the user handle. (user handles are per fd, but the binding is * per vm, which may be one per context or shared with the global GTT) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4cc9ce4b5b16..d99d05a91032 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -268,6 +268,13 @@ static inline u64 gen8_noncanonical_addr(u64 address) return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); } +static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +{ + return intel_engine_requires_cmd_parser(eb->engine) || + (intel_engine_using_cmd_parser(eb->engine) && + eb->args->batch_len); +} + static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { @@ -1165,6 +1172,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (unlikely(!cache->rq)) { int err; + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1902,10 +1913,38 @@ static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) return 0; } -static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) +static struct i915_vma * +shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = eb->i915; + struct i915_address_space *vm; + u64 flags; + + /* + * PPGTT backed shadow buffers must be mapped RO, to prevent + * post-scan tampering + */ + if (CMDPARSER_USES_GGTT(dev_priv)) { + flags = PIN_GLOBAL; + vm = &dev_priv->ggtt.base; + } else if (eb->vm->has_read_only) { + flags = PIN_USER; + vm = eb->vm; + i915_gem_object_set_readonly(obj); + } else { + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + return ERR_PTR(-EINVAL); + } + + return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); +} + +static struct i915_vma *eb_parse(struct i915_execbuffer *eb) { struct drm_i915_gem_object *shadow_batch_obj; struct i915_vma *vma; + u64 batch_start; + u64 shadow_batch_start; int err; shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, @@ -1913,29 +1952,53 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) if (IS_ERR(shadow_batch_obj)) return ERR_CAST(shadow_batch_obj); - err = intel_engine_cmd_parser(eb->engine, + vma = shadow_batch_pin(eb, shadow_batch_obj); + if (IS_ERR(vma)) + goto out; + + batch_start = gen8_canonical_addr(eb->batch->node.start) + + eb->batch_start_offset; + + shadow_batch_start = gen8_canonical_addr(vma->node.start); + + err = intel_engine_cmd_parser(eb->ctx, + eb->engine, eb->batch->obj, - shadow_batch_obj, + batch_start, eb->batch_start_offset, eb->batch_len, - is_master); + shadow_batch_obj, + shadow_batch_start); + if (err) { - if (err == -EACCES) /* unhandled chained batch */ + i915_vma_unpin(vma); + + /* + * Unsafe GGTT-backed buffers can still be submitted safely + * as non-secure. + * For PPGTT backing however, we have no choice but to forcibly + * reject unsafe buffers + */ + if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES)) + /* Execute original buffer non-secure */ vma = NULL; else vma = ERR_PTR(err); - goto out; - } - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) goto out; + } eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->flags[eb->buffer_count] = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; vma->exec_flags = &eb->flags[eb->buffer_count]; eb->buffer_count++; + eb->batch_start_offset = 0; + eb->batch = vma; + /* eb->batch_len unchanged */ + + if (CMDPARSER_USES_GGTT(eb->i915)) + eb->batch_flags |= I915_DISPATCH_SECURE; out: i915_gem_object_unpin_pages(shadow_batch_obj); @@ -2186,6 +2249,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec, struct drm_syncobj **fences) { + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; @@ -2195,7 +2259,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); - eb.i915 = to_i915(dev); + eb.i915 = dev_priv; eb.file = file; eb.args = args; if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) @@ -2217,8 +2281,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { + if (INTEL_GEN(dev_priv) >= 11) + return -ENODEV; + + /* Return -EPERM to trigger fallback code on old binaries. */ + if (!HAS_SECURE_BATCHES(dev_priv)) + return -EPERM; + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; eb.batch_flags |= I915_DISPATCH_SECURE; } @@ -2305,34 +2376,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (eb.engine->needs_cmd_parser && eb.batch_len) { + if (eb.batch_len == 0) + eb.batch_len = eb.batch->size - eb.batch_start_offset; + + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; - vma = eb_parse(&eb, drm_is_current_master(file)); + vma = eb_parse(&eb); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_vma; } - - if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - eb.batch_flags |= I915_DISPATCH_SECURE; - eb.batch_start_offset = 0; - eb.batch = vma; - } } - if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; - /* * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ad524cb0f6fc..47d178817a29 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -159,7 +159,8 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; - if (enable_ppgtt == 1) + /* Full PPGTT is required by the Gen9 cmdparser */ + if (enable_ppgtt == 1 && INTEL_GEN(dev_priv) != 9) return 1; if (enable_ppgtt == 2 && has_full_ppgtt) @@ -207,9 +208,9 @@ static int ppgtt_bind_vma(struct i915_vma *vma, vma->pages = vma->obj->mm.pages; - /* Currently applicable only to VLV */ + /* Applicable to VLV, and gen8+ */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); @@ -223,10 +224,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma) } static gen8_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + u32 flags) { - gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW; - pte |= addr; + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; switch (level) { case I915_CACHE_NONE: @@ -487,7 +491,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm, struct i915_page_table *pt) { fill_px(vm, pt, - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC)); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); } static void gen6_initialize_pt(struct i915_address_space *vm, @@ -691,7 +695,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, unsigned int pte = gen8_pte_index(start); unsigned int pte_end = pte + num_entries; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -863,10 +867,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, struct i915_page_directory_pointer *pdp, struct sgt_dma *iter, struct gen8_insert_pte *idx, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); gen8_pte_t *vaddr; bool ret; @@ -917,20 +922,20 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, - cache_level); + cache_level, flags); } static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); @@ -938,7 +943,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, - &idx, cache_level)) + &idx, cache_level, flags)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); } @@ -1264,7 +1269,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { struct i915_address_space *vm = &ppgtt->base; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); u64 start = 0, length = ppgtt->base.total; if (use_4lvl(vm)) { @@ -1339,6 +1344,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) return ret; } + /* + * From bdw, there is support for read-only pages in the PPGTT. + * + * XXX GVT is not honouring the lack of RW in the PTE bits. + */ + ppgtt->base.has_read_only = !intel_vgpu_active(dev_priv); + /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ @@ -2078,7 +2090,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); - gen8_set_pte(pte, gen8_pte_encode(addr, level)); + gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); ggtt->invalidate(vm->i915); } @@ -2086,14 +2098,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct sgt_iter sgt_iter; gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); dma_addr_t addr; + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries += vma->node.start >> PAGE_SHIFT; for_each_sgt_dma(addr, sgt_iter, vma->pages) @@ -2162,7 +2179,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2223,13 +2240,14 @@ struct insert_entries { struct i915_address_space *vm; struct i915_vma *vma; enum i915_cache_level level; + u32 flags; }; static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; - gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2238,9 +2256,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { - struct insert_entries arg = { vm, vma, level }; + struct insert_entries arg = { vm, vma, level, flags }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } @@ -2337,9 +2355,9 @@ static int ggtt_bind_vma(struct i915_vma *vma, return ret; } - /* Currently applicable only to VLV */ + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ pte_flags = 0; - if (obj->gt_ro) + if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; intel_runtime_pm_get(i915); @@ -2381,7 +2399,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, /* Currently applicable only to VLV */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { @@ -3063,6 +3081,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) */ mutex_lock(&dev_priv->drm.struct_mutex); i915_address_space_init(&ggtt->base, dev_priv, "[global]"); + + /* Only VLV supports read-only GGTT mappings */ + ggtt->base.has_read_only = IS_VALLEYVIEW(dev_priv); + if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; mutex_unlock(&dev_priv->drm.struct_mutex); @@ -3095,7 +3117,6 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) return -EIO; - return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0dbbe840f5f0..4b63d6cbd81e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -295,7 +295,12 @@ struct i915_address_space { struct list_head unbound_list; struct pagevec free_pages; - bool pt_kmap_wc; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index c30d8f808185..39cfe04dcdb8 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -140,7 +140,6 @@ struct drm_i915_gem_object { * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit */ - unsigned long gt_ro:1; unsigned int cache_level:3; unsigned int cache_coherent:2; #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) @@ -313,6 +312,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) reservation_object_unlock(obj->resv); } +static inline void +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) +{ + obj->base.vma_node.readonly = true; +} + +static inline bool +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) +{ + return obj->base.vma_node.readonly; +} + static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 813a3b546d6e..1d556dcbd656 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -252,6 +252,10 @@ static void mark_busy(struct drm_i915_private *i915) GEM_BUG_ON(!i915->gt.active_requests); intel_runtime_pm_get_noresume(i915); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + i915->gt.awake = true; intel_enable_gt_powersave(i915); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 05ae8c4a8a1b..480d20758324 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -690,8 +690,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, i915_gem_gtt_finish_pages(obj, pages); for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) + if (obj->mm.dirty && trylock_page(page)) { + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + * + * However...! + * + * The mmu-notifier can be invalidated for a + * migrate_page, that is alreadying holding the lock + * on the page. Such a try_to_unmap() will result + * in us calling put_pages() and so recursively try + * to lock the page. We avoid that deadlock with + * a trylock_page() and in exchange we risk missing + * some page dirtying. + */ set_page_dirty(page); + unlock_page(page); + } mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2e706f1abe64..1db70350af0b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -358,6 +358,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_CONFIG0 _MMIO(0xD00) #define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1) +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_PPGTT_CACHE64B (3<<8) @@ -703,6 +705,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define BCS_SWCTRL _MMIO(0x22200) +/* There are 16 GPR registers */ +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) + #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -6722,6 +6728,10 @@ enum { #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) +/* Display Internal Timeout Register */ +#define RM_TIMEOUT _MMIO(0x42060) +#define MMIO_TIMEOUT_US(us) ((us) << 0) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 5fe9f3f39467..e13ddd559f79 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -100,6 +100,9 @@ static struct _balloon_info_ bl_info; static void vgt_deballoon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + if (!drm_mm_node_allocated(node)) + return; + DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", node->start, node->start + node->size, diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 92c1f8e166dc..0bdbbd4027fe 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -277,10 +277,17 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, uint32_t i; uint32_t *dmc_payload; uint32_t required_version; + size_t fsize; if (!fw) return NULL; + fsize = sizeof(struct intel_css_header) + + sizeof(struct intel_package_header) + + sizeof(struct intel_dmc_header); + if (fsize > fw->size) + goto error_truncated; + /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != @@ -350,6 +357,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return NULL; } readcount += dmc_offset; + fsize += dmc_offset; + if (fsize > fw->size) + goto error_truncated; /* Extract dmc_header information. */ dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; @@ -380,6 +390,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; + fsize += nbytes; + if (fsize > fw->size) + goto error_truncated; + if (nbytes > CSR_MAX_FW_SIZE) { DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); return NULL; @@ -393,6 +407,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, } return memcpy(dmc_payload, &fw->data[readcount], nbytes); + +error_truncated: + DRM_ERROR("Truncated DMC firmware, rejecting.\n"); + return NULL; } static void csr_load_work_fn(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3adb9c3b412e..cba10cdab2a9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1838,6 +1838,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 2ff2ee7f3b78..03c592753fc3 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -422,8 +422,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cb377b003321..674410682ccc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -121,6 +121,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); + + /* + * Lower the display internal timeout. + * This is needed to avoid any hard hangs when DSI port PLL + * is off and a MMIO access is attempted by any privilege + * application, using batch buffers or any other means. + */ + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6274,19 +6282,23 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_i915_private *dev_priv) +static void gen6_disable_rc6(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RC_CONTROL, 0); +} + +static void gen6_disable_rps(struct drm_i915_private *dev_priv) +{ I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_i915_private *dev_priv) +static void cherryview_disable_rc6(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_i915_private *dev_priv) +static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) { /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ @@ -6537,7 +6549,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6() & INTEL_RC6_ENABLE) + if (!dev_priv->rps.ctx_corrupted && + intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ @@ -6586,7 +6599,8 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6() & INTEL_RC6_ENABLE) + if (!dev_priv->rps.ctx_corrupted && + intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; intel_print_rc6_info(dev_priv, rc6_mask); if (IS_BROADWELL(dev_priv)) @@ -7767,6 +7781,95 @@ static void intel_init_emon(struct drm_i915_private *dev_priv) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } +static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv) +{ + return !I915_READ(GEN8_RC6_CTX_INFO); +} + +static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + i915->rps.ctx_corrupted = true; + intel_runtime_pm_get(i915); + } +} + +static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915) +{ + if (i915->rps.ctx_corrupted) { + intel_runtime_pm_put(i915); + i915->rps.ctx_corrupted = false; + } +} + +/** + * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to clean up the RC6 CTX WA before system suspend. + */ +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915) +{ + if (i915->rps.ctx_corrupted) + intel_runtime_pm_put(i915); +} + +/** + * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915) +{ + if (!i915->rps.ctx_corrupted) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + intel_runtime_pm_get(i915); + return; + } + + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + i915->rps.ctx_corrupted = false; +} + +static void intel_disable_rc6(struct drm_i915_private *dev_priv); + +/** + * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @i915: i915 device + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. + * + * Return false if no context corruption has happened since the last call of + * this function, true otherwise. +*/ +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return false; + + if (i915->rps.ctx_corrupted) + return false; + + if (!i915_rc6_ctx_corrupted(i915)) + return false; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_disable_rc6(i915); + i915->rps.ctx_corrupted = true; + intel_runtime_pm_get_noresume(i915); + + return true; +} + void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { /* @@ -7781,6 +7884,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->rps.hw_lock); + i915_rc6_ctx_wa_init(dev_priv); + /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) cherryview_init_gt_powersave(dev_priv); @@ -7830,6 +7935,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); + i915_rc6_ctx_wa_cleanup(dev_priv); + if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } @@ -7861,27 +7968,47 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) gen6_reset_rps_interrupts(dev_priv); } -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +static void __intel_disable_rc6(struct drm_i915_private *dev_priv) { - if (!READ_ONCE(dev_priv->rps.enabled)) - return; + if (INTEL_GEN(dev_priv) >= 9) + gen9_disable_rc6(dev_priv); + else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rc6(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rc6(dev_priv); + else if (INTEL_GEN(dev_priv) >= 6) + gen6_disable_rc6(dev_priv); +} +static void intel_disable_rc6(struct drm_i915_private *dev_priv) +{ mutex_lock(&dev_priv->rps.hw_lock); + __intel_disable_rc6(dev_priv); + mutex_unlock(&dev_priv->rps.hw_lock); +} - if (INTEL_GEN(dev_priv) >= 9) { - gen9_disable_rc6(dev_priv); +static void intel_disable_rps(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 9) gen9_disable_rps(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) { - cherryview_disable_rps(dev_priv); - } else if (IS_VALLEYVIEW(dev_priv)) { - valleyview_disable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 6) { + else if (INTEL_GEN(dev_priv) >= 6) gen6_disable_rps(dev_priv); - } else if (IS_IRONLAKE_M(dev_priv)) { + else if (IS_IRONLAKE_M(dev_priv)) ironlake_disable_drps(dev_priv); - } +} + +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +{ + if (!READ_ONCE(dev_priv->rps.enabled)) + return; + + mutex_lock(&dev_priv->rps.hw_lock); + + __intel_disable_rc6(dev_priv); + intel_disable_rps(dev_priv); dev_priv->rps.enabled = false; + mutex_unlock(&dev_priv->rps.hw_lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cdf084ef5aae..63667a5c2c87 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1358,6 +1358,7 @@ void intel_ring_unpin(struct intel_ring *ring) static struct i915_vma * intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { + struct i915_address_space *vm = &dev_priv->ggtt.base; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -1367,10 +1368,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) if (IS_ERR(obj)) return ERR_CAST(obj); - /* mark ring buffers as read-only from GPU side by default */ - obj->gt_ro = 1; + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) goto err; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6b2067f10824..774e3772d0ed 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -417,7 +417,9 @@ struct intel_engine_cs { struct intel_engine_hangcheck hangcheck; - bool needs_cmd_parser; +#define I915_ENGINE_USING_CMD_PARSER BIT(0) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3) + unsigned int flags; /* * Table of commands the command parser needs to know about @@ -444,6 +446,18 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; +static inline bool +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; +} + static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 29a3b0f5bec7..6d1bd11226c6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -928,6 +928,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, + u8 audio_state) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, + &audio_state, 1); +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -1359,11 +1366,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else sdvox |= SDVO_PIPE_SEL(crtc->pipe); - if (crtc_state->has_audio) { - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4); - sdvox |= SDVO_AUDIO_ENABLE; - } - if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || @@ -1492,8 +1494,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - if (sdvox & SDVO_AUDIO_ENABLE) - pipe_config->has_audio = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, + &val, 1)) { + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT; + + if ((val & mask) == mask) + pipe_config->has_audio = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { @@ -1506,6 +1513,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier, encoder_pixel_multiplier); } +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +{ + intel_sdvo_set_audio_state(intel_sdvo, 0); +} + +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + u8 *eld = connector->eld; + + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; + + intel_sdvo_set_audio_state(intel_sdvo, 0); + + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, + SDVO_HBUF_TX_DISABLED, + eld, drm_eld_size(eld)); + + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | + SDVO_AUDIO_PRESENCE_DETECT); +} + static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *conn_state) @@ -1515,6 +1548,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 temp; + if (old_crtc_state->has_audio) + intel_sdvo_disable_audio(intel_sdvo); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1598,6 +1634,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + + if (pipe_config->has_audio) + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -2468,7 +2507,6 @@ static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; - struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; @@ -2504,9 +2542,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - /* gen3 doesn't do the hdmi bits in the SDVO register */ - if (INTEL_GEN(dev_priv) >= 4 && - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index db0ed499268a..e9ba3b047f93 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 + #define SDVO_AUDIO_ELD_VALID (1 << 0) + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1) + #define SDVO_AUDIO_CP_READY (1 << 2) #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_HBUF_INDEX_ELD 0 #define SDVO_HBUF_INDEX_AVI_IF 1 diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 0fee46b91c60..b1d66bc440b5 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -100,14 +100,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ipu_dc_disable(ipu); ipu_prg_disable(ipu); + drm_crtc_vblank_off(crtc); + spin_lock_irq(&crtc->dev->event_lock); - if (crtc->state->event) { + if (crtc->state->event && !crtc->state->active) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); - - drm_crtc_vblank_off(crtc); } static void ipu_crtc_reset(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index a2ca90fc403c..670662128edd 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -185,6 +185,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct mtk_drm_private *private = drm->dev_private; struct platform_device *pdev; struct device_node *np; + struct device *dma_dev; int ret; if (!iommu_present(&platform_bus_type)) @@ -242,7 +243,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; } - private->dma_dev = &pdev->dev; + dma_dev = &pdev->dev; + private->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + if (!dma_dev->dma_parms) { + private->dma_parms_allocated = true; + dma_dev->dma_parms = + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), + GFP_KERNEL); + } + if (!dma_dev->dma_parms) { + ret = -ENOMEM; + goto err_component_unbind; + } + + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + if (ret) { + dev_err(dma_dev, "Failed to set DMA segment size\n"); + goto err_unset_dma_parms; + } /* * We don't use the drm_irq_install() helpers provided by the DRM @@ -252,13 +275,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_component_unbind; + goto err_unset_dma_parms; drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm); return 0; +err_unset_dma_parms: + if (private->dma_parms_allocated) + dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); err_config_cleanup: @@ -269,7 +295,13 @@ err_config_cleanup: static void mtk_drm_kms_deinit(struct drm_device *drm) { + struct mtk_drm_private *private = drm->dev_private; + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + + if (private->dma_parms_allocated) + private->dma_dev->dma_parms = NULL; component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); @@ -286,6 +318,18 @@ static const struct file_operations mtk_drm_fops = { .compat_ioctl = drm_compat_ioctl, }; +/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, @@ -297,7 +341,7 @@ static struct drm_driver mtk_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, @@ -460,12 +504,15 @@ static int mtk_drm_probe(struct platform_device *pdev) comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; + of_node_put(node); goto err_node; } ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); - if (ret) + if (ret) { + of_node_put(node); goto err_node; + } private->ddp_comp[comp_id] = comp; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index c3378c452c0a..445dd45e65eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -56,6 +56,8 @@ struct mtk_drm_private { } commit; struct drm_atomic_state *suspend_state; + + bool dma_parms_allocated; }; extern struct platform_driver mtk_ddp_driver; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 7e5e24c2152a..c1b8caad65e6 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -631,6 +631,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return; + /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) { if (dsi->panel) { if (drm_panel_unprepare(dsi->panel)) { @@ -697,7 +706,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) } } - mtk_dsi_stop(dsi); mtk_dsi_poweroff(dsi); dsi->enabled = false; @@ -851,6 +859,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) /* Skip connector cleanup if creation was delegated to the bridge */ if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); + if (dsi->panel) + drm_panel_detach(dsi->panel); } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 0b6011b8d632..85fa39e2be34 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -124,6 +124,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane, priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; + case DRM_FORMAT_XBGR8888: + /* For XRGB, replace the pixel's alpha by 0xFF */ + writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN, + priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | + OSD_COLOR_MATRIX_32_ABGR; + break; case DRM_FORMAT_ARGB8888: /* For ARGB, use the pixel's alpha */ writel_bits_relaxed(OSD_REPLACE_EN, 0, @@ -131,6 +138,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane, priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; + case DRM_FORMAT_ABGR8888: + /* For ARGB, use the pixel's alpha */ + writel_bits_relaxed(OSD_REPLACE_EN, 0, + priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | + OSD_COLOR_MATRIX_32_ABGR; + break; case DRM_FORMAT_RGB888: priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 | OSD_COLOR_MATRIX_24_RGB; @@ -200,7 +214,9 @@ static const struct drm_plane_funcs meson_plane_funcs = { static const uint32_t supported_drm_formats[] = { DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, DRM_FORMAT_RGB888, DRM_FORMAT_RGB565, }; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 606df7bea97b..77c45a2ebd83 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1060,7 +1060,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); @@ -1097,16 +1098,24 @@ static int msm_pdev_probe(struct platform_device *pdev) ret = add_gpu_components(&pdev->dev, &match); if (ret) - return ret; + goto fail; /* on all devices that I am aware of, iommu's which can map * any address the cpu can see are used: */ ret = dma_set_mask_and_coherent(&pdev->dev, ~0); if (ret) - return ret; + goto fail; + + ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); + if (ret) + goto fail; + + return 0; - return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); +fail: + of_platform_depopulate(&pdev->dev); + return ret; } static int msm_pdev_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index c02a13406a81..a0c0630a0857 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -16,10 +16,21 @@ config DRM_NOUVEAU select INPUT if ACPI && X86 select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 - select DRM_VM help Choose this option for open-source NVIDIA support. +config NOUVEAU_LEGACY_CTX_SUPPORT + bool "Nouveau legacy context support" + depends on DRM_NOUVEAU + select DRM_VM + default y + help + There was a version of the nouveau DDX that relied on legacy + ctx ioctls not erroring out. But that was back in time a long + ways, so offer a way to disable it now. For uapi compat with + old nouveau ddx this should be on by default, but modern distros + should consider turning it off. + config NOUVEAU_PLATFORM_DRIVER bool "Nouveau (NVIDIA) SoC GPUs" depends on DRM_NOUVEAU && ARCH_TEGRA diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index bb9d578760e7..b32c0c93b71f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector) return; if (connector->state) - __drm_atomic_helper_connector_destroy_state(connector->state); + nouveau_conn_atomic_destroy_state(connector, connector->state); __drm_atomic_helper_connector_reset(connector, &asyc->state); asyc->dither.mode = DITHERING_MODE_AUTO; asyc->dither.depth = DITHERING_DEPTH_AUTO; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7d3c7bb0ebfa..70a8d0b0c4f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -967,8 +967,11 @@ nouveau_driver_fops = { static struct drm_driver driver_stub = { .driver_features = - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | - DRIVER_KMS_LEGACY_CONTEXT, + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) + | DRIVER_KMS_LEGACY_CONTEXT +#endif + , .load = nouveau_drm_load, .unload = nouveau_drm_unload, diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index b0ad7fcefcf5..5ed1e9dad8da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -273,7 +273,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) return drm_legacy_mmap(filp, vma); +#else + return -EINVAL; +#endif return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c index 7143ea4611aa..33a9fb5ac558 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c @@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, info->min = min(info->base, info->base + info->step * info->vidmask); info->max = nvbios_rd32(bios, volt + 0x0e); + if (!info->max) + info->max = max(info->base, info->base + info->step * info->vidmask); break; case 0x50: info->min = nvbios_rd32(bios, volt + 0x0a); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index b4e7404fe660..a11637b0f6cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) u8 *ptr = msg->buf; while (remaining) { - u8 cnt = (remaining > 16) ? 16 : remaining; - u8 cmd; + u8 cnt, retries, cmd; if (msg->flags & I2C_M_RD) cmd = 1; @@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if (mcnt || remaining > 16) cmd |= 4; /* MOT */ - ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); - if (ret < 0) { - nvkm_i2c_aux_release(aux); - return ret; + for (retries = 0, cnt = 0; + retries < 32 && !cnt; + retries++) { + cnt = min_t(u8, remaining, 16); + ret = aux->func->xfer(aux, true, cmd, + msg->addr, ptr, &cnt); + if (ret < 0) + goto out; + } + if (!cnt) { + AUX_TRACE(aux, "no data after 32 retries"); + ret = -EIO; + goto out; } ptr += cnt; @@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) msg++; } + ret = num; +out: nvkm_i2c_aux_release(aux); - return num; + return ret; } static u32 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index ecacb22834d7..719345074711 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -185,6 +185,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) } static int +nvkm_i2c_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_i2c *i2c = nvkm_i2c(subdev); + struct nvkm_i2c_bus *bus; + struct nvkm_i2c_pad *pad; + + /* + * We init our i2c busses as early as possible, since they may be + * needed by the vbios init scripts on some cards + */ + list_for_each_entry(pad, &i2c->pad, head) + nvkm_i2c_pad_init(pad); + list_for_each_entry(bus, &i2c->bus, head) + nvkm_i2c_bus_init(bus); + + return 0; +} + +static int nvkm_i2c_init(struct nvkm_subdev *subdev) { struct nvkm_i2c *i2c = nvkm_i2c(subdev); @@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev) static const struct nvkm_subdev_func nvkm_i2c = { .dtor = nvkm_i2c_dtor, + .preinit = nvkm_i2c_preinit, .init = nvkm_i2c_init, .fini = nvkm_i2c_fini, .intr = nvkm_i2c_intr, diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 41ebb37aaa79..844e1142a122 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1100,7 +1100,7 @@ static const struct dss_features omap34xx_dss_feats = { static const struct dss_features omap3630_dss_feats = { .model = DSS_MODEL_OMAP3, - .fck_div_max = 32, + .fck_div_max = 31, .fck_freq_max = 173000000, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index c3994bd58aa1..2813933b666e 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -614,9 +614,9 @@ static const struct panel_desc auo_g133han01 = { static const struct display_timing auo_g185han01_timings = { .pixelclock = { 120000000, 144000000, 175000000 }, .hactive = { 1920, 1920, 1920 }, - .hfront_porch = { 18, 60, 74 }, - .hback_porch = { 12, 44, 54 }, - .hsync_len = { 10, 24, 32 }, + .hfront_porch = { 36, 120, 148 }, + .hback_porch = { 24, 88, 108 }, + .hsync_len = { 20, 48, 64 }, .vactive = { 1080, 1080, 1080 }, .vfront_porch = { 6, 10, 40 }, .vback_porch = { 2, 5, 20 }, @@ -2410,7 +2410,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = desc->format; dsi->lanes = desc->lanes; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err) { + struct panel_simple *panel = dev_get_drvdata(&dsi->dev); + + drm_panel_remove(&panel->base); + } + + return err; } static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e18204baed21..ff59d4b762ef 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -764,7 +764,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_encoder->output_csc = val; - if (connector->encoder->crtc) { + if (connector->encoder && connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 90d5b41007bf..9e5645e4cb55 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index f1fa8d5c9b52..7010424b2f89 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -861,7 +861,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, struct vop *vop = to_vop(crtc); adjusted_mode->clock = - clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; + DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000), + 1000); return true; } diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index d394a03632c4..c3bd80b03f16 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_bridge.h> #include <drm/drm_plane_helper.h> @@ -691,6 +692,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = { }; static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = ltdc_plane_atomic_check, .atomic_update = ltdc_plane_atomic_update, .atomic_disable = ltdc_plane_atomic_disable, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index b0d70f943cec..56039897607c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -189,6 +189,12 @@ static void tilcdc_fini(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; +#ifdef CONFIG_CPU_FREQ + if (priv->freq_transition.notifier_call) + cpufreq_unregister_notifier(&priv->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +#endif + if (priv->crtc) tilcdc_crtc_shutdown(priv->crtc); @@ -204,12 +210,6 @@ static void tilcdc_fini(struct drm_device *dev) drm_mode_config_cleanup(dev); tilcdc_remove_external_device(dev); -#ifdef CONFIG_CPU_FREQ - if (priv->freq_transition.notifier_call) - cpufreq_unregister_notifier(&priv->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); -#endif - if (priv->clk) clk_put(priv->clk); @@ -282,17 +282,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) goto init_failed; } -#ifdef CONFIG_CPU_FREQ - priv->freq_transition.notifier_call = cpufreq_transition; - ret = cpufreq_register_notifier(&priv->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); - if (ret) { - dev_err(dev, "failed to register cpufreq notifier\n"); - priv->freq_transition.notifier_call = NULL; - goto init_failed; - } -#endif - if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH; @@ -369,6 +358,17 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) } modeset_init(ddev); +#ifdef CONFIG_CPU_FREQ + priv->freq_transition.notifier_call = cpufreq_transition; + ret = cpufreq_register_notifier(&priv->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); + if (ret) { + dev_err(dev, "failed to register cpufreq notifier\n"); + priv->freq_transition.notifier_call = NULL; + goto init_failed; + } +#endif + if (priv->is_componentized) { ret = component_bind_all(dev, ddev); if (ret < 0) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index b45ac6bc8add..b428c3da7576 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -43,10 +43,16 @@ static const struct file_operations udl_driver_fops = { .llseek = noop_llseek, }; +static void udl_driver_release(struct drm_device *dev) +{ + udl_fini(dev); + udl_modeset_cleanup(dev); + drm_dev_fini(dev); + kfree(dev); +} + static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = udl_driver_load, - .unload = udl_driver_unload, .release = udl_driver_release, /* gem hooks */ @@ -70,28 +76,56 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static struct udl_device *udl_driver_create(struct usb_interface *interface) +{ + struct usb_device *udev = interface_to_usbdev(interface); + struct udl_device *udl; + int r; + + udl = kzalloc(sizeof(*udl), GFP_KERNEL); + if (!udl) + return ERR_PTR(-ENOMEM); + + r = drm_dev_init(&udl->drm, &driver, &interface->dev); + if (r) { + kfree(udl); + return ERR_PTR(r); + } + + udl->udev = udev; + udl->drm.dev_private = udl; + + r = udl_init(udl); + if (r) { + drm_dev_fini(&udl->drm); + kfree(udl); + return ERR_PTR(r); + } + + usb_set_intfdata(interface, udl); + return udl; +} + static int udl_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { - struct usb_device *udev = interface_to_usbdev(interface); - struct drm_device *dev; int r; + struct udl_device *udl; - dev = drm_dev_alloc(&driver, &interface->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + udl = udl_driver_create(interface); + if (IS_ERR(udl)) + return PTR_ERR(udl); - r = drm_dev_register(dev, (unsigned long)udev); + r = drm_dev_register(&udl->drm, 0); if (r) goto err_free; - usb_set_intfdata(interface, dev); - DRM_INFO("Initialized udl on minor %d\n", dev->primary->index); + DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); return 0; err_free: - drm_dev_unref(dev); + drm_dev_unref(&udl->drm); return r; } diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 307455dd6526..d5a5dcd15dd8 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -49,8 +49,8 @@ struct urb_list { struct udl_fbdev; struct udl_device { + struct drm_device drm; struct device *dev; - struct drm_device *ddev; struct usb_device *udev; struct drm_crtc *crtc; @@ -68,6 +68,8 @@ struct udl_device { atomic_t cpu_kcycles_used; /* transpired during pixel processing */ }; +#define to_udl(x) container_of(x, struct udl_device, drm) + struct udl_gem_object { struct drm_gem_object base; struct page **pages; @@ -99,9 +101,8 @@ struct urb *udl_get_urb(struct drm_device *dev); int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); void udl_urb_completion(struct urb *urb); -int udl_driver_load(struct drm_device *dev, unsigned long flags); -void udl_driver_unload(struct drm_device *dev); -void udl_driver_release(struct drm_device *dev); +int udl_init(struct udl_device *udl); +void udl_fini(struct drm_device *dev); int udl_fbdev_init(struct drm_device *dev); void udl_fbdev_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 491f1892b50e..f41fd0684ce4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, int width, int height) { struct drm_device *dev = fb->base.dev; - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int i, ret; char *cmd; cycles_t start_cycles, end_cycles; @@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user) { struct udl_fbdev *ufbdev = info->par; struct drm_device *dev = ufbdev->ufb.base.dev; - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); /* If the USB device is gone, we don't accept new opens */ - if (drm_dev_is_unplugged(udl->ddev)) + if (drm_dev_is_unplugged(&udl->drm)) return -ENODEV; ufbdev->fb_count++; @@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev, int udl_fbdev_init(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int bpp_sel = fb_bpp; struct udl_fbdev *ufbdev; int ret; @@ -480,7 +480,7 @@ free: void udl_fbdev_cleanup(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); if (!udl->fbdev) return; @@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev) void udl_fbdev_unplug(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); struct udl_fbdev *ufbdev; if (!udl->fbdev) return; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 60866b422f81..124428f33e1e 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -28,7 +28,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, struct usb_device *usbdev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); char *desc; char *buf; char *desc_end; @@ -164,7 +164,7 @@ void udl_urb_completion(struct urb *urb) static void udl_free_urb_list(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int count = udl->urbs.count; struct list_head *node; struct urb_node *unode; @@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev) static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); struct urb *urb; struct urb_node *unode; char *buf; @@ -262,7 +262,7 @@ retry: struct urb *udl_get_urb(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int ret = 0; struct list_head *entry; struct urb_node *unode; @@ -296,7 +296,7 @@ error: int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int ret; BUG_ON(len > udl->urbs.size); @@ -311,20 +311,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) return ret; } -int udl_driver_load(struct drm_device *dev, unsigned long flags) +int udl_init(struct udl_device *udl) { - struct usb_device *udev = (void*)flags; - struct udl_device *udl; + struct drm_device *dev = &udl->drm; int ret = -ENOMEM; DRM_DEBUG("\n"); - udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL); - if (!udl) - return -ENOMEM; - - udl->udev = udev; - udl->ddev = dev; - dev->dev_private = udl; if (!udl_parse_vendor_descriptor(dev, udl->udev)) { ret = -ENODEV; @@ -359,7 +351,6 @@ err_fb: err: if (udl->urbs.count) udl_free_urb_list(dev); - kfree(udl); DRM_ERROR("%d\n", ret); return ret; } @@ -370,20 +361,12 @@ int udl_drop_usb(struct drm_device *dev) return 0; } -void udl_driver_unload(struct drm_device *dev) +void udl_fini(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); if (udl->urbs.count) udl_free_urb_list(dev); udl_fbdev_cleanup(dev); - kfree(udl); -} - -void udl_driver_release(struct drm_device *dev) -{ - udl_modeset_cleanup(dev); - drm_dev_fini(dev); - kfree(dev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index ed9c443bb8a1..40cc2f6707cf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -523,6 +523,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, ret = wait_event_timeout(vgdev->resp_wq, atomic_read(&cache_ent->is_valid), 5 * HZ); + /* is_valid check must proceed before copy of the cache entry. */ + smp_rmb(); + ptr = cache_ent->caps_cache; copy_exit: diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 26a2da1f712d..21c2de81f3e3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -585,6 +585,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, cache_ent->id == le32_to_cpu(cmd->capset_id)) { memcpy(cache_ent->caps_cache, resp->capset_data, cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); atomic_set(&cache_ent->is_valid, 1); break; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 49c28a48c5ab..dc677ba4dc38 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2495,7 +2495,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || + cmd->body.type < SVGA3D_SHADERTYPE_MIN) { DRM_ERROR("Illegal shader type %u.\n", (unsigned) cmd->body.type); return -EINVAL; @@ -2734,6 +2735,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, if (view_type == vmw_view_max) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); + if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { + DRM_ERROR("Invalid surface id.\n"); + return -EINVAL; + } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->sid, &srf_node); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 97000996b8dc..4394c209cb2b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -264,7 +264,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -288,7 +288,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -300,7 +300,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } - if (retries == RETRIES) + if (!reply) return -EINVAL; *msg_len = reply_len; diff --git a/drivers/gpu/imx/ipu-v3/ipu-ic.c b/drivers/gpu/imx/ipu-v3/ipu-ic.c index 321eb983c2f5..65d7daf944b0 100644 --- a/drivers/gpu/imx/ipu-v3/ipu-ic.c +++ b/drivers/gpu/imx/ipu-v3/ipu-ic.c @@ -256,7 +256,7 @@ static int init_csc(struct ipu_ic *ic, writel(param, base++); param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) | - (params->sat << 9); + (params->sat << 10); writel(param, base++); param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) | diff --git a/drivers/gpu/imx/ipu-v3/ipu-pre.c b/drivers/gpu/imx/ipu-v3/ipu-pre.c index 1d1612e28854..6fd4af647f59 100644 --- a/drivers/gpu/imx/ipu-v3/ipu-pre.c +++ b/drivers/gpu/imx/ipu-v3/ipu-pre.c @@ -102,6 +102,7 @@ struct ipu_pre { void *buffer_virt; bool in_use; unsigned int safe_window_end; + unsigned int last_bufaddr; }; static DEFINE_MUTEX(ipu_pre_list_mutex); @@ -177,6 +178,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | @@ -218,7 +220,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) unsigned short current_yblock; u32 val; + if (bufaddr == pre->last_bufaddr) + return; + writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; do { if (time_after(jiffies, timeout)) { |