summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-08-05 09:04:59 +1000
committerDave Airlie <airlied@redhat.com>2014-08-05 09:04:59 +1000
commit5d42f82a9b8c5168d75cf59307cd271feca94464 (patch)
treea7049189a2c814589b3ad7e6e776fb736f980967 /drivers/gpu/drm
parentc759606c96dc052373d4c36ea383595da46b04e9 (diff)
parent19583ca584d6f574384e17fe7613dfaeadcdc4a6 (diff)
Merge tag 'v3.16' into drm-next
Linux 3.16 backmerge requested by i915, nouveau and radeon authors Conflicts: drivers/gpu/drm/i915/i915_gem_render_state.c drivers/gpu/drm/i915/intel_drv.h
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
44 files changed, 464 insertions, 263 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b0b03b62d090..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1469,12 +1469,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
- int ret;
+ int ret = 0;
DRM_INFO("Replacing VGA console driver\n");
console_lock();
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
if (ret == 0) {
ret = do_unregister_con_driver(&vga_con);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f4832b770522..5de27f9b8c26 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -685,6 +685,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b695d184c487..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (base == 0)
return 0;
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b55b2b9075b2..6ef9d6fabf80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3150,7 +3150,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
- u32 seqno, ctl;
+ u32 seqno;
ring->hangcheck.deadlock++;
@@ -3162,15 +3162,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
- /* cursory check for an unkickable deadlock */
- ctl = I915_READ_CTL(signaller);
- if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
- return -1;
-
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
- if (signaller->hangcheck.deadlock)
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
return -1;
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 66dc39fb53c0..fe5c27630e95 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -995,6 +995,9 @@ enum punit_power_well {
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0f861301a94e..99eb7cad62a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12404,6 +12404,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -12472,6 +12480,15 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5336 */
{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -12712,6 +12729,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* ... */
plane = crtc->plane;
crtc->plane = !plane;
+ crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0f05b88a75e3..ea6ff71b2910 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+ This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+ edp_notifier);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_div;
+ u32 pp_ctrl_reg, pp_div_reg;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ if (!is_edp(intel_dp) || code != SYS_RESTART)
+ return 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
+
+ return 0;
+}
+
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -888,8 +921,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = min_clock; clock <= max_clock; clock++) {
+ for (clock = min_clock; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -3955,6 +3988,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->edp_notifier.notifier_call) {
+ unregister_reboot_notifier(&intel_dp->edp_notifier);
+ intel_dp->edp_notifier.notifier_call = NULL;
+ }
}
kfree(intel_dig_port);
}
@@ -4487,6 +4524,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
+ if (IS_VALLEYVIEW(dev)) {
+ intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+ register_reboot_notifier(&intel_dp->edp_notifier);
+ }
+
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b2837c5fccfc..8a475a6909c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -556,6 +556,9 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
+
+ struct notifier_block edp_notifier;
+
bool use_tps3;
bool can_mst; /* this port supports mst */
bool is_mst;
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 2ee1722c0af4..bfcefbf33709 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
+ usleep_range(2500, 3000);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 3eeb21b9fddf..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
else
cmd |= DPI_LP_MODE;
- /* DPI virtual channel?! */
-
- mask = DPI_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c511287bbb86..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -117,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 97bc3eab9cd6..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -404,6 +404,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ /*
+ * If the acpi_video interface is not supposed to be used, don't
+ * bother processing backlight level change requests from firmware.
+ */
+ if (!acpi_video_verify_backlight_support()) {
+ DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ return 0;
+ }
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f2d5f2ebcdde..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -1244,8 +1244,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
int ret;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("native backlight control not available per VBT\n");
- return 0;
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ } else {
+ DRM_DEBUG_KMS("no backlight present per VBT\n");
+ return 0;
+ }
}
/* set level and max in panel struct */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 26e962b7e702..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
}
switch ((ctrl & 0x000f0000) >> 16) {
- case 6: datarate = pclk * 30 / 8; break;
- case 5: datarate = pclk * 24 / 8; break;
+ case 6: datarate = pclk * 30; break;
+ case 5: datarate = pclk * 24; break;
case 2:
default:
- datarate = pclk * 18 / 8;
+ datarate = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 48aa38a87e3f..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
if (outp->info.type == DCB_OUTPUT_DP) {
u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
switch ((sync & 0x000003c0) >> 6) {
- case 6: pclk = pclk * 30 / 8; break;
- case 5: pclk = pclk * 24 / 8; break;
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
case 2:
default:
- pclk = pclk * 18 / 8;
+ pclk = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 52c299c3d300..eb2d7789555d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
struct nvkm_output_dp *outp = (void *)base;
bool retrain = true;
u8 link[2], stat[3];
- u32 rate;
+ u32 linkrate;
int ret, i;
/* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
goto done;
}
- rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
- if (rate < ((datarate / 8) * 10)) {
+ linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+ linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+ datarate = (datarate + 9) / 10; /* -> decakilobits */
+ if (linkrate < datarate) {
DBG("link not trained at sufficient rate\n");
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index e1832778e8b6..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
struct nvkm_output_dp *outpdp = (void *)outp;
switch (data) {
case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+ nouveau_event_put(outpdp->irq);
((struct nvkm_output_dp_impl *)nv_oclass(outp))
->lnk_pwr(outpdp, 0);
atomic_set(&outpdp->lt.done, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
};
}
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
ramfuc_reg(u32 addr)
{
return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 1ad3ea503133..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
/* (re)program mempll, if required */
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+ ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NOUVEAU_THERM_THRS_SHUTDOWN);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
/* schedule the next poll in one second */
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
- ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ ptimer->alarm(ptimer, 1000000000ULL, alarm);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index afe706a20f97..758c11cb9a9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -532,17 +532,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
if (state == 1)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 0)
+ if (state == 0) {
nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_zfill(dev, drm->fbcon);
+ }
console_unlock();
}
}
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- nouveau_fbcon_zfill(dev, drm->fbcon);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
void nouveau_fbcon_restore_accel(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index afdf607df3e6..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+ mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+ mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
mthd |= nv_encoder->or;
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
pending = xchg(&qdev->ram_header->int_pending, 0);
+ if (!pending)
+ return IRQ_NONE;
+
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 35f4182c63b6..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
- r = -EBUSY;
+ r = -EIO;
goto done;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder;
- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ /* Set a reasonable default here if the level is 0 otherwise
+ * fbdev will attempt to turn the backlight on after console
+ * unblanking and it will try and restore 0 which turns the backlight
+ * off again.
+ */
+ if (bd->props.brightness == 0)
+ bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..584090ac3eb9 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
tmp &= ~GLOBAL_PWRMGT_EN;
WREG32_SMC(GENERAL_PWRMGT, tmp);
- tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
tmp &= ~DYNAMIC_PM_EN;
WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index dcd4518a9b08..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -7676,14 +7678,16 @@ restart_ih:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cik_vm_decode_fault(rdev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 167: /* VCE */
DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e2f605224e8c..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -5066,14 +5068,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cayman_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc04c04e..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */
struct list_head vm_list;
+ struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@@ -684,10 +685,9 @@ struct radeon_flip_work {
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
- struct drm_framebuffer *fb;
+ uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct radeon_bo *new_rbo;
struct radeon_fence *fence;
};
@@ -868,6 +868,9 @@ struct radeon_vm {
struct list_head va;
unsigned id;
+ /* BOs freed, but not yet updated in the PT */
+ struct list_head freed;
+
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@@ -876,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
+ struct radeon_bo_va *ib_bo_va;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -2833,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@@ -2848,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
+ struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ r = radeon_vm_clear_freed(rdev, vm);
+ if (r)
+ return r;
+
+ if (vm->ib_bo_va == NULL) {
+ DRM_ERROR("Tmp BO not in VM!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
- r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
- if (radeon_vm_size < 4) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ if (radeon_vm_size < 1) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
- if (radeon_vm_size > 1024*1024) {
- dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ if (radeon_vm_size > 1024) {
+ dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
- radeon_vm_size < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896edcf0b6..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
- radeon_fence_unref(&work->fence);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
- struct drm_framebuffer *fb = work->fb;
-
- uint32_t tiling_flags, pitch_pixels;
- uint64_t base;
-
unsigned long flags;
int r;
down_read(&rdev->exclusive_lock);
- while (work->fence) {
+ if (work->fence) {
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
down_read(&rdev->exclusive_lock);
}
+ if (r)
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
- if (r) {
- DRM_ERROR("failed to wait on page flip fence (%d)!\n",
- r);
- goto cleanup;
- } else
- radeon_fence_unref(&work->fence);
+ /* We continue with the page flip even if we failed to wait on
+ * the fence, otherwise the DRM core and userspace will be
+ * confused about which BO the CRTC is scanning out
+ */
+
+ radeon_fence_unref(&work->fence);
}
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ /* set the proper interrupt */
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ struct radeon_bo *new_rbo;
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
+ unsigned long flags;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&new_rbo->tbo.bdev->fence_lock);
+ if (new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+ spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
/* pin the new buffer */
- DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, work->new_rbo);
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+ work->old_rbo, new_rbo);
- r = radeon_bo_reserve(work->new_rbo, false);
+ r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
}
base &= ~7;
}
+ work->base = base;
r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
@@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* set the proper interrupt */
- radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+ if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ r = -EBUSY;
+ goto vblank_cleanup;
+ }
+ radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+ radeon_crtc->flip_work = work;
- /* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
- radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- up_read(&rdev->exclusive_lock);
- return;
+ queue_work(radeon_crtc->flip_queue, &work->flip_work);
+ return 0;
+
+vblank_cleanup:
+ drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
pflip_cleanup:
- if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
+ if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
- if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
+ if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
- up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_flip_work *work;
- unsigned long flags;
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- INIT_WORK(&work->flip_work, radeon_flip_work_func);
- INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- work->fb = fb;
- work->event = event;
-
- /* schedule unpin of the old buffer */
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- obj = old_radeon_fb->obj;
-
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- work->old_rbo = gem_to_radeon_bo(obj);
-
- new_radeon_fb = to_radeon_framebuffer(fb);
- obj = new_radeon_fb->obj;
- work->new_rbo = gem_to_radeon_bo(obj);
-
- spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
- if (work->new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
- spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
- kfree(work);
- return -EBUSY;
- }
- radeon_crtc->flip_status = RADEON_FLIP_PENDING;
- radeon_crtc->flip_work = work;
-
- /* update crtc fb */
- crtc->primary->fb = fb;
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
- return 0;
+ return r;
}
static int
@@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* don't leak the edid if we already fetched it in detect() */
+ if (radeon_connector->edid)
+ goto got_edid;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
+got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
int radeon_deep_color = 0;
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+ RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
+ if (vm->ib_bo_va)
+ radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
+ INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list;
}
+ if (bo_va->soffset) {
+ /* add a clone of the bo_va to clear the old address */
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
+ tmp->soffset = bo_va->soffset;
+ tmp->eoffset = bo_va->eoffset;
+ tmp->vm = vm;
+ list_add(&tmp->vm_status, &vm->freed);
+ }
+
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
+ struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
+ bo_va->bo, vm);
return -EINVAL;
}
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va);
- nptes = radeon_bo_ngpu_pages(bo);
+ nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ list_del(&bo_va->vm_status);
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ kfree(bo_va);
+ if (r)
+ return r;
+ }
+ return 0;
+
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
*
* Object have to be reserved!
*/
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
{
- int r = 0;
+ struct radeon_vm *vm = bo_va->vm;
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset)
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+ list_del(&bo_va->bo_list);
+ mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- return r;
+ if (bo_va->soffset) {
+ bo_va->bo = NULL;
+ list_add(&bo_va->vm_status, &vm->freed);
+ } else {
+ kfree(bo_va);
+ }
+
+ mutex_unlock(&vm->mutex);
}
/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
+ vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
+ INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
- /* disable ss, causes hangs on some cayman boards */
- if (rdev->family == CHIP_CAYMAN) {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- }
-
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 730cee2c34cf..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6376,14 +6377,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
si_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on latops with
- * bapm installed when switching between AC and battery
- * power. At the same time, some desktop boards hang
- * if it's not enabled and dpm is enabled.
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
*/
- if (rdev->flags & RADEON_IS_MOBILITY)
- pi->enable_bapm = false;
- else
+ if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;