summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c301
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c182
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h415
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c412
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c435
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c164
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c675
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c442
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c259
-rw-r--r--drivers/gpu/drm/i915/i915_params.c155
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h336
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c101
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c365
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c294
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
34 files changed, 3332 insertions, 1676 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9fd44f5f3b3b..f33902ff2c22 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -14,6 +14,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_gtt.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_params.o \
i915_sysfs.o \
i915_trace_points.o \
i915_ums.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b2b46c52294c..2dc05c30b800 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
if (obj->user_pin_count > 0)
return "P";
- else if (obj->pin_count > 0)
+ else if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@@ -123,6 +123,8 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
+ int pin_count = 0;
+
seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
&obj->base,
get_pin_flag(obj),
@@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- if (obj->pin_count)
- seq_printf(m, " (pinned x %d)", obj->pin_count);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ pin_count++;
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -447,7 +451,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (list == PINNED_LIST && obj->pin_count == 0)
+ if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
continue;
seq_puts(m, " ");
@@ -712,8 +716,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- seq_printf(m, "Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
@@ -1733,6 +1735,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
+static int per_file_ctx(int id, void *ptr, void *data)
+{
+ struct i915_hw_context *ctx = ptr;
+ struct seq_file *m = data;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+
+ ppgtt->debug_dump(ppgtt, m);
+
+ return 0;
+}
+
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1762,6 +1775,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ struct drm_file *file;
int i;
if (INTEL_INFO(dev)->gen == 6)
@@ -1780,6 +1794,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+
+ ppgtt->debug_dump(ppgtt, m);
+ } else
+ return;
+
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_ppgtt *pvt_ppgtt;
+
+ pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
+ seq_printf(m, "proc: %s\n",
+ get_pid_task(file->pid, PIDTYPE_PID)->comm);
+ seq_puts(m, " default context:\n");
+ idr_for_each(&file_priv->context_idr, per_file_ctx, m);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
}
@@ -1892,6 +1920,44 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
return 0;
}
+static int i915_sink_crc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct intel_dp *intel_dp = NULL;
+ int ret;
+ u8 crc[6];
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+
+ if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ encoder = to_intel_encoder(connector->base.encoder);
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ ret = intel_dp_sink_crc(intel_dp, crc);
+ if (ret)
+ goto out;
+
+ seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
+ crc[0], crc[1], crc[2],
+ crc[3], crc[4], crc[5]);
+ goto out;
+ }
+ ret = -ENODEV;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -2756,6 +2822,174 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+{
+ struct drm_device *dev = m->private;
+ int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int level;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level],
+ latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, uint16_t wm[5])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ uint16_t new[5] = { 0 };
+ int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -2929,7 +3163,7 @@ i915_drop_caches_set(void *data, u64 val)
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, x, &vm->inactive_list,
mm_list) {
- if (vma->obj->pin_count)
+ if (vma->pin_count)
continue;
ret = i915_vma_unbind(vma);
@@ -2989,6 +3223,7 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3007,14 +3242,29 @@ i915_max_freq_set(void *data, u64 val)
*/
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- dev_priv->rps.max_delay = val;
- valleyview_set_rps(dev, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
- dev_priv->rps.max_delay = val;
- gen6_set_rps(dev, val);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.hw_max;
+ hw_min = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
}
+ dev_priv->rps.max_delay = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -3054,6 +3304,7 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3072,13 +3323,29 @@ i915_min_freq_set(void *data, u64 val)
*/
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- dev_priv->rps.min_delay = val;
- valleyview_set_rps(dev, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
- dev_priv->rps.min_delay = val;
- gen6_set_rps(dev, val);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.hw_max;
+ hw_min = (rp_state_cap >> 16) & 0xff;
}
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
+ }
+
+ dev_priv->rps.min_delay = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -3248,6 +3515,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dpio", i915_dpio_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
@@ -3269,6 +3537,9 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
};
void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15a74f979b4b..258b1be20db3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -990,7 +990,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
@@ -1374,7 +1374,7 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
intel_display_power_put(dev, POWER_DOMAIN_VGA);
@@ -1776,8 +1776,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
if (!I915_NEED_GFX_HWS(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02c4019..2d05d7ce4c29 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,134 +38,30 @@
#include <linux/module.h>
#include <drm/drm_crtc_helper.h>
-static int i915_modeset __read_mostly = -1;
-module_param_named(modeset, i915_modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
- "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
- "1=on, -1=force vga console preference [default])");
-
-unsigned int i915_fbpercrtc __always_unused = 0;
-module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-
-int i915_panel_ignore_lid __read_mostly = 1;
-module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
-unsigned int i915_powersave __read_mostly = 1;
-module_param_named(powersave, i915_powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
-int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0400);
-MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-
-int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
-MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6. "
- "Different stages can be selected via bitmask values "
- "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
- "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
- "default: -1 (use per-chip default)");
-
-int i915_enable_fbc __read_mostly = -1;
-module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
-MODULE_PARM_DESC(i915_enable_fbc,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-
-unsigned int i915_lvds_downclock __read_mostly = 0;
-module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
- "Use panel (LVDS/eDP) downclocking for power savings "
- "(default: false)");
-
-int i915_lvds_channel_mode __read_mostly;
-module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
-MODULE_PARM_DESC(lvds_channel_mode,
- "Specify LVDS channel mode "
- "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-int i915_panel_use_ssc __read_mostly = -1;
-module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-
-int i915_vbt_sdvo_panel_type __read_mostly = -1;
-module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override/Ignore selection of SDVO panel mode in the VBT "
- "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
-static bool i915_try_reset __read_mostly = true;
-module_param_named(reset, i915_try_reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
-
-bool i915_enable_hangcheck __read_mostly = true;
-module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
- "Periodically check GPU activity for detecting hangs. "
- "WARNING: Disabling this can cause system wide hangs. "
- "(default: true)");
-
-int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
-MODULE_PARM_DESC(i915_enable_ppgtt,
- "Enable PPGTT (default: true)");
-
-int i915_enable_psr __read_mostly = 0;
-module_param_named(enable_psr, i915_enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
-
-unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
-module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
-
-int i915_disable_power_well __read_mostly = 1;
-module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
-MODULE_PARM_DESC(disable_power_well,
- "Disable the power well when possible (default: true)");
-
-int i915_enable_ips __read_mostly = 1;
-module_param_named(enable_ips, i915_enable_ips, int, 0600);
-MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
-
-bool i915_fastboot __read_mostly = 0;
-module_param_named(fastboot, i915_fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
- "(default: false)");
-
-int i915_enable_pc8 __read_mostly = 1;
-module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
-MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
-
-int i915_pc8_timeout __read_mostly = 5000;
-module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
-MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
-
-bool i915_prefault_disable __read_mostly;
-module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
-MODULE_PARM_DESC(prefault_disable,
- "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
-
static struct drm_driver driver;
+#define GEN_DEFAULT_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
@@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = {
.has_overlay = 1, .overlay_needs_physical = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
@@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = {
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_g33_info = {
@@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
@@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = {
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
#define GEN7_FEATURES \
@@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_broadwell_d_info = {
@@ -346,6 +265,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
static const struct intel_device_info intel_broadwell_m_info = {
@@ -354,6 +274,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
};
/*
@@ -482,12 +403,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
/* Until we get further testing... */
if (IS_GEN8(dev)) {
- WARN_ON(!i915_preliminary_hw_support);
+ WARN_ON(!i915.preliminary_hw_support);
return false;
}
- if (i915_semaphores >= 0)
- return i915_semaphores;
+ if (i915.semaphores >= 0)
+ return i915.semaphores;
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
@@ -643,6 +564,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
mutex_lock(&dev->struct_mutex);
@@ -655,7 +577,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
- drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -752,7 +673,7 @@ int i915_reset(struct drm_device *dev)
bool simulated;
int ret;
- if (!i915_try_reset)
+ if (!i915.reset)
return 0;
mutex_lock(&dev->struct_mutex);
@@ -807,6 +728,17 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev);
drm_irq_install(dev);
+
+ /* rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of drm irq. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset. */
+ if (INTEL_INFO(dev)->gen > 5) {
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
@@ -820,7 +752,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+ if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
@@ -1051,14 +983,14 @@ static int __init i915_init(void)
* the default behavior.
*/
#if defined(CONFIG_DRM_I915_KMS)
- if (i915_modeset != 0)
+ if (i915.modeset != 0)
driver.driver_features |= DRIVER_MODESET;
#endif
- if (i915_modeset == 1)
+ if (i915.modeset == 1)
driver.driver_features |= DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && i915_modeset == -1)
+ if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index df77e20e3c3d..9d8ca2a36fde 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,7 +58,8 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
- I915_MAX_PIPES
+ _PIPE_EDP,
+ I915_MAX_PIPES = _PIPE_EDP
};
#define pipe_name(p) ((p) + 'A')
@@ -66,7 +67,8 @@ enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
- TRANSCODER_EDP = 0xF,
+ TRANSCODER_EDP,
+ I915_MAX_TRANSCODERS
};
#define transcoder_name(t) ((t) + 'A')
@@ -295,53 +297,80 @@ struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
+ struct timeval time;
+
+ /* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 ccid;
u32 derrmr;
u32 forcewake;
- bool waiting[I915_NUM_RINGS];
- u32 pipestat[I915_MAX_PIPES];
- u32 tail[I915_NUM_RINGS];
- u32 head[I915_NUM_RINGS];
- u32 ctl[I915_NUM_RINGS];
- u32 ipeir[I915_NUM_RINGS];
- u32 ipehr[I915_NUM_RINGS];
- u32 instdone[I915_NUM_RINGS];
- u32 acthd[I915_NUM_RINGS];
- u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head[I915_NUM_RINGS];
- u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
- u32 bbstate[I915_NUM_RINGS];
- u32 instpm[I915_NUM_RINGS];
- u32 instps[I915_NUM_RINGS];
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
- u32 seqno[I915_NUM_RINGS];
- u64 bbaddr[I915_NUM_RINGS];
- u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
- u32 faddr[I915_NUM_RINGS];
+ u32 gac_eco;
+ u32 gam_ecochk;
+ u32 gab_ctl;
+ u32 gfx_mode;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u32 pipestat[I915_MAX_PIPES];
u64 fence[I915_MAX_NUM_FENCES];
- struct timeval time;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+
struct drm_i915_error_ring {
bool valid;
+ /* Software tracked state */
+ bool waiting;
+ int hangcheck_score;
+ enum intel_ring_hangcheck_action hangcheck_action;
+ int num_requests;
+
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head;
+ u32 cpu_ring_tail;
+
+ u32 semaphore_seqno[I915_NUM_RINGS - 1];
+
+ /* Register state */
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 acthd;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u32 seqno;
+ u64 bbaddr;
+ u32 fault_reg;
+ u32 faddr;
+ u32 rc_psmi; /* sleep state */
+ u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer, *ctx;
+ } *ringbuffer, *batchbuffer, *ctx, *hws_page;
+
struct drm_i915_error_request {
long jiffies;
u32 seqno;
u32 tail;
} *requests;
- int num_requests;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
@@ -358,11 +387,8 @@ struct drm_i915_error_state {
s32 ring:4;
u32 cache_level:3;
} **active_bo, **pinned_bo;
+
u32 *active_bo_count, *pinned_bo_count;
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
- int hangcheck_score[I915_NUM_RINGS];
- enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_connector;
@@ -507,6 +533,12 @@ struct intel_device_info {
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int dpll_offsets[I915_MAX_PIPES];
+ int dpll_md_offsets[I915_MAX_PIPES];
+ int palette_offsets[I915_MAX_PIPES];
};
#undef DEFINE_FLAG
@@ -524,6 +556,57 @@ enum i915_cache_level {
typedef uint32_t gen6_gtt_pte_t;
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /** Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page. */
+ void (*unbind_vma)(struct i915_vma *vma);
+ /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+ void (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
@@ -605,6 +688,8 @@ struct i915_gtt {
struct i915_hw_ppgtt {
struct i915_address_space base;
+ struct kref ref;
+ struct drm_mm_node node;
unsigned num_pd_entries;
union {
struct page **pt_pages;
@@ -621,37 +706,12 @@ struct i915_hw_ppgtt {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
- int (*enable)(struct drm_device *dev);
-};
-
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
-
- /** This object's place on the active/inactive lists */
- struct list_head mm_list;
-
- struct list_head vma_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
+ int (*enable)(struct i915_hw_ppgtt *ppgtt);
+ int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous);
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct i915_ctx_hang_stats {
@@ -676,9 +736,10 @@ struct i915_hw_context {
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_ring_buffer *ring;
+ struct intel_ring_buffer *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
+ struct i915_address_space *vm;
struct list_head link;
};
@@ -831,11 +892,7 @@ struct i915_suspend_saved_registers {
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
- u32 saveDPFC_CB_BASE;
- u32 saveFBC_CFB_BASE;
- u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
- u32 saveFBC_CONTROL2;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
@@ -905,8 +962,6 @@ struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
@@ -915,6 +970,9 @@ struct intel_gen6_power_mgmt {
u8 rp0_delay;
u8 hw_max;
+ bool rp_up_masked;
+ bool rp_down_masked;
+
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -1361,8 +1419,6 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
- atomic_t irq_received;
-
/* protects the irq masks */
spinlock_t irq_lock;
@@ -1627,18 +1683,6 @@ struct drm_i915_gem_object {
*/
unsigned int fence_dirty:1;
- /** How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
@@ -1751,7 +1795,7 @@ struct drm_i915_file_private {
} mm;
struct idr context_idr;
- struct i915_ctx_hang_stats hang_stats;
+ struct i915_hw_context *private_default_ctx;
atomic_t rps_wait_boost;
};
@@ -1824,7 +1868,11 @@ struct drm_i915_file_private {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
+ && !IS_BROADWELL(dev))
+#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
+#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1887,32 +1935,39 @@ struct drm_i915_file_private {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc __always_unused;
-extern int i915_panel_ignore_lid __read_mostly;
-extern unsigned int i915_powersave __read_mostly;
-extern int i915_semaphores __read_mostly;
-extern unsigned int i915_lvds_downclock __read_mostly;
-extern int i915_lvds_channel_mode __read_mostly;
-extern int i915_panel_use_ssc __read_mostly;
-extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern int i915_enable_rc6 __read_mostly;
-extern int i915_enable_fbc __read_mostly;
-extern bool i915_enable_hangcheck __read_mostly;
-extern int i915_enable_ppgtt __read_mostly;
-extern int i915_enable_psr __read_mostly;
-extern unsigned int i915_preliminary_hw_support __read_mostly;
-extern int i915_disable_power_well __read_mostly;
-extern int i915_enable_ips __read_mostly;
-extern bool i915_fastboot __read_mostly;
-extern int i915_enable_pc8 __read_mostly;
-extern int i915_pc8_timeout __read_mostly;
-extern bool i915_prefault_disable __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+/* i915_params.c */
+struct i915_params {
+ int modeset;
+ int panel_ignore_lid;
+ unsigned int powersave;
+ int semaphores;
+ unsigned int lvds_downclock;
+ int lvds_channel_mode;
+ int panel_use_ssc;
+ int vbt_sdvo_panel_type;
+ int enable_rc6;
+ int enable_fbc;
+ bool enable_hangcheck;
+ int enable_ppgtt;
+ int enable_psr;
+ unsigned int preliminary_hw_support;
+ int disable_power_well;
+ int enable_ips;
+ bool fastboot;
+ int enable_pc8;
+ int pc8_timeout;
+ bool prefault_disable;
+ bool reset;
+ int invert_brightness;
+};
+extern struct i915_params i915 __read_mostly;
+
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -1945,6 +2000,8 @@ extern void intel_console_resume(struct work_struct *work);
void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+ int new_delay);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
@@ -2014,6 +2071,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
@@ -2022,7 +2081,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
-void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
@@ -2186,6 +2245,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ return true;
+ return false;
+}
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
@@ -2225,46 +2291,56 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
}
/* i915_gem_context.c */
+#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_reset(struct drm_device *dev);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
- struct drm_file *file, int to_id);
+ struct drm_file *file, struct i915_hw_context *to);
+struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
{
- kref_get(&ctx->ref);
+ if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+ kref_get(&ctx->ref);
}
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
{
- kref_put(&ctx->ref, i915_gem_context_free);
+ if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+ kref_put(&ctx->ref, i915_gem_context_free);
+}
+
+static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+{
+ return c->id == DEFAULT_CONTEXT_ID;
}
-struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct drm_device *dev,
- struct drm_file *file,
- u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* i915_gem_gtt.c */
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_everything(struct drm_device *dev);
+/* i915_gem_gtt.c */
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2275,18 +2351,64 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+ if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ return false;
+ if (i915.enable_ppgtt == 1 && full)
+ return false;
-/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
- struct i915_address_space *vm,
- int min_size,
- unsigned alignment,
- unsigned cache_level,
- bool mappable,
- bool nonblock);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
-int i915_gem_evict_everything(struct drm_device *dev);
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ DRM_INFO("Disabling PPGTT because VT-d is on\n");
+ return false;
+ }
+#endif
+
+ if (full)
+ return HAS_PPGTT(dev);
+ else
+ return HAS_ALIASING_PPGTT(dev);
+}
+
+static inline void ppgtt_release(struct kref *kref)
+{
+ struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+
+ if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+ (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+ ppgtt->base.cleanup(&ppgtt->base);
+ return;
+ }
+
+ /*
+ * Make sure vmas are unbound before we take down the drm_mm
+ *
+ * FIXME: Proper refcounting should take care of this, this shouldn't be
+ * needed at all.
+ */
+ if (!list_empty(&vm->active_list)) {
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ if (WARN_ON(list_empty(&vma->vma_link) ||
+ list_is_singular(&vma->vma_link)))
+ break;
+
+ i915_gem_evict_vm(&ppgtt->base, true);
+ } else {
+ i915_gem_retire_requests(dev);
+ i915_gem_evict_vm(&ppgtt->base, false);
+ }
+
+ ppgtt->base.cleanup(&ppgtt->base);
+}
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -2566,4 +2688,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00c836154725..a8a069f97c56 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
+ if (i915_gem_obj_is_pinned(obj))
pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
@@ -476,7 +476,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
- if (likely(!i915_prefault_disable) && !prefaulted) {
+ if (likely(!i915.prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
out:
return ret;
}
@@ -868,7 +868,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915_prefault_disable)) {
+ if (likely(!i915.prefault_disable)) {
ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
args->size);
if (ret)
@@ -1420,7 +1420,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1453,6 +1453,7 @@ out:
ret = VM_FAULT_OOM;
break;
case -ENOSPC:
+ case -EFAULT:
ret = VM_FAULT_SIGBUS;
break;
default:
@@ -1618,7 +1619,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
- ret = -EINVAL;
+ ret = -EFAULT;
goto out;
}
@@ -1972,7 +1973,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to obtain a purgeable object\n");
- return -EINVAL;
+ return -EFAULT;
}
BUG_ON(obj->pages_pin_count);
@@ -2035,13 +2036,17 @@ static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (vma && !list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vm->inactive_list);
+ }
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2237,125 +2242,47 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- if (acthd >= i915_gem_obj_offset(obj, vm) &&
- acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
- return true;
-
- return false;
-}
-
-static bool i915_head_inside_request(const u32 acthd_unmasked,
- const u32 request_start,
- const u32 request_end)
-{
- const u32 acthd = acthd_unmasked & HEAD_ADDR;
-
- if (request_start < request_end) {
- if (acthd >= request_start && acthd < request_end)
- return true;
- } else if (request_start > request_end) {
- if (acthd >= request_start || acthd < request_end)
- return true;
- }
-
- return false;
-}
-
-static struct i915_address_space *
-request_to_vm(struct drm_i915_gem_request *request)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+ const struct i915_hw_context *ctx)
{
- struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
- struct i915_address_space *vm;
-
- vm = &dev_priv->gtt.base;
-
- return vm;
-}
+ unsigned long elapsed;
-static bool i915_request_guilty(struct drm_i915_gem_request *request,
- const u32 acthd, bool *inside)
-{
- /* There is a possibility that unmasked head address
- * pointing inside the ring, matches the batch_obj address range.
- * However this is extremely unlikely.
- */
- if (request->batch_obj) {
- if (i915_head_inside_object(acthd, request->batch_obj,
- request_to_vm(request))) {
- *inside = true;
- return true;
- }
- }
-
- if (i915_head_inside_request(acthd, request->head, request->tail)) {
- *inside = false;
- return true;
- }
-
- return false;
-}
-
-static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
-{
- const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+ elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
- if (hs->banned)
+ if (ctx->hang_stats.banned)
return true;
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
- DRM_ERROR("context hanging too fast, declaring banned!\n");
+ if (dev_priv->gpu_error.stop_rings == 0 &&
+ i915_gem_context_is_default(ctx)) {
+ DRM_ERROR("gpu hanging too fast, banning!\n");
+ } else {
+ DRM_DEBUG("context hanging too fast, banning!\n");
+ }
+
return true;
}
return false;
}
-static void i915_set_reset_status(struct intel_ring_buffer *ring,
- struct drm_i915_gem_request *request,
- u32 acthd)
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+ struct i915_hw_context *ctx,
+ const bool guilty)
{
- struct i915_ctx_hang_stats *hs = NULL;
- bool inside, guilty;
- unsigned long offset = 0;
-
- /* Innocent until proven guilty */
- guilty = false;
-
- if (request->batch_obj)
- offset = i915_gem_obj_offset(request->batch_obj,
- request_to_vm(request));
+ struct i915_ctx_hang_stats *hs;
- if (ring->hangcheck.action != HANGCHECK_WAIT &&
- i915_request_guilty(request, acthd, &inside)) {
- DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
- ring->name,
- inside ? "inside" : "flushing",
- offset,
- request->ctx ? request->ctx->id : 0,
- acthd);
+ if (WARN_ON(!ctx))
+ return;
- guilty = true;
- }
+ hs = &ctx->hang_stats;
- /* If contexts are disabled or this is the default context, use
- * file_priv->reset_state
- */
- if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
- hs = &request->ctx->hang_stats;
- else if (request->file_priv)
- hs = &request->file_priv->hang_stats;
-
- if (hs) {
- if (guilty) {
- hs->banned = i915_context_is_banned(hs);
- hs->batch_active++;
- hs->guilty_ts = get_seconds();
- } else {
- hs->batch_pending++;
- }
+ if (guilty) {
+ hs->banned = i915_context_is_banned(dev_priv, ctx);
+ hs->batch_active++;
+ hs->guilty_ts = get_seconds();
+ } else {
+ hs->batch_pending++;
}
}
@@ -2370,19 +2297,39 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static struct drm_i915_gem_request *
+i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
{
- u32 completed_seqno = ring->get_seqno(ring, false);
- u32 acthd = intel_ring_get_active_head(ring);
struct drm_i915_gem_request *request;
+ const u32 completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
continue;
- i915_set_reset_status(ring, request, acthd);
+ return request;
}
+
+ return NULL;
+}
+
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ bool ring_hung;
+
+ request = i915_gem_find_first_non_complete(ring);
+
+ if (request == NULL)
+ return;
+
+ ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+ i915_set_reset_status(dev_priv, request->ctx, ring_hung);
+
+ list_for_each_entry_continue(request, &ring->request_list, list)
+ i915_set_reset_status(dev_priv, request->ctx, false);
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
@@ -2456,6 +2403,8 @@ void i915_gem_reset(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_reset(dev);
+
i915_gem_restore_fences(dev);
}
@@ -2474,6 +2423,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2495,22 +2462,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
i915_gem_free_request(request);
}
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
-
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
- break;
-
- i915_gem_object_move_to_inactive(obj);
- }
-
if (unlikely(ring->trace_irq_seqno &&
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
@@ -2753,9 +2704,6 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
- /* For now we only ever use 1 vma per object */
- WARN_ON(!list_is_singular(&obj->vma_list));
-
if (list_empty(&vma->vma_link))
return 0;
@@ -2765,7 +2713,7 @@ int i915_vma_unbind(struct i915_vma *vma)
return 0;
}
- if (obj->pin_count)
+ if (vma->pin_count)
return -EBUSY;
BUG_ON(obj->pages == NULL);
@@ -2787,12 +2735,8 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_unbind_object(obj);
- if (obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
- obj->has_aliasing_ppgtt_mapping = 0;
- }
+ vma->unbind_vma(vma);
+
i915_gem_gtt_finish_object(obj);
list_del(&vma->mm_list);
@@ -2829,7 +2773,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
if (!i915_gem_obj_ggtt_bound(obj))
return 0;
- if (obj->pin_count)
+ if (i915_gem_obj_to_ggtt(obj)->pin_count)
return -EBUSY;
BUG_ON(obj->pages == NULL);
@@ -2845,7 +2789,7 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ ret = i915_switch_context(ring, NULL, ring->default_context);
if (ret)
return ret;
@@ -3312,17 +3256,12 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- BUG_ON(!i915_is_ggtt(vm));
-
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
- /* For now we only ever use 1 vma per object */
- WARN_ON(!list_is_singular(&obj->vma_list));
-
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
@@ -3528,14 +3467,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma;
int ret;
if (obj->cache_level == cache_level)
return 0;
- if (obj->pin_count) {
+ if (i915_gem_obj_is_pinned(obj)) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
@@ -3567,11 +3505,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, cache_level);
- if (obj->has_aliasing_ppgtt_mapping)
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, cache_level);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ vma->bind_vma(vma, cache_level, 0);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3695,7 +3630,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
- return obj->pin_count - !!obj->user_pin_count;
+ return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
}
/*
@@ -3769,7 +3704,7 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
obj->pin_display = is_pin_display(obj);
}
@@ -3899,21 +3834,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool map_and_fenceable,
bool nonblocking)
{
+ const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
struct i915_vma *vma;
int ret;
- if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
- return -EBUSY;
-
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
+ if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
+
if ((alignment &&
vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
- WARN(obj->pin_count,
+ WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
@@ -3927,34 +3863,34 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (!i915_gem_obj_bound(obj, vm)) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- if (!obj->has_global_gtt_mapping && map_and_fenceable)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ vma = i915_gem_obj_to_vma(obj, vm);
- obj->pin_count++;
+ vma->bind_vma(vma, obj->cache_level, flags);
+
+ i915_gem_obj_to_vma(obj, vm)->pin_count++;
obj->pin_mappable |= map_and_fenceable;
return 0;
}
void
-i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pin_count == 0);
- BUG_ON(!i915_gem_obj_bound_any(obj));
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+
+ BUG_ON(!vma);
+ BUG_ON(vma->pin_count == 0);
+ BUG_ON(!i915_gem_obj_ggtt_bound(obj));
- if (--obj->pin_count == 0)
+ if (--vma->pin_count == 0)
obj->pin_mappable = false;
}
@@ -3966,6 +3902,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
+ if (INTEL_INFO(dev)->gen >= 6)
+ return -ENODEV;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -3978,7 +3917,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
- ret = -EINVAL;
+ ret = -EFAULT;
goto out;
}
@@ -4038,7 +3977,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count--;
if (obj->user_pin_count == 0) {
obj->pin_filp = NULL;
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
}
out:
@@ -4118,7 +4057,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- if (obj->pin_count) {
+ if (i915_gem_obj_is_pinned(obj)) {
ret = -EINVAL;
goto out;
}
@@ -4229,12 +4168,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
- obj->pin_count = 0;
- /* NB: 0 or 1 elements */
- WARN_ON(!list_empty(&obj->vma_list) &&
- !list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
+ int ret;
+
+ vma->pin_count = 0;
+ ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
@@ -4283,41 +4221,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
return NULL;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->vma_link);
- INIT_LIST_HEAD(&vma->mm_list);
- INIT_LIST_HEAD(&vma->exec_list);
- vma->vm = vm;
- vma->obj = obj;
-
- /* Keep GGTT vmas first to make debug easier */
- if (i915_is_ggtt(vm))
- list_add(&vma->vma_link, &obj->vma_list);
- else
- list_add_tail(&vma->vma_link, &obj->vma_list);
-
- return vma;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
-
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = __i915_gem_vma_create(obj, vm);
-
- return vma;
-}
-
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
@@ -4508,9 +4411,15 @@ i915_gem_init_hw(struct drm_device *dev)
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
- u32 temp = I915_READ(GEN7_MSG_CTL);
- temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
- I915_WRITE(GEN7_MSG_CTL, temp);
+ if (IS_IVYBRIDGE(dev)) {
+ u32 temp = I915_READ(GEN7_MSG_CTL);
+ temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+ I915_WRITE(GEN7_MSG_CTL, temp);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+ temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+ }
}
i915_gem_init_swizzling(dev);
@@ -4523,25 +4432,23 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
/*
- * XXX: There was some w/a described somewhere suggesting loading
- * contexts before PPGTT.
+ * XXX: Contexts should only be initialized once. Doing a switch to the
+ * default context switch however is something we'd like to do after
+ * reset or thaw (the latter may not actually be necessary for HW, but
+ * goes with our code better). Context switching requires rings (for
+ * the do_switch), but before enabling PPGTT. So don't move this.
*/
- ret = i915_gem_context_init(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret) {
- i915_gem_cleanup_ringbuffer(dev);
- DRM_ERROR("Context initialization failed %d\n", ret);
- return ret;
- }
-
- if (dev_priv->mm.aliasing_ppgtt) {
- ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
- }
+ DRM_ERROR("Context enable failed %d\n", ret);
+ goto err_out;
}
return 0;
+
+err_out:
+ i915_gem_cleanup_ringbuffer(dev);
+ return ret;
}
int i915_gem_init(struct drm_device *dev)
@@ -4560,10 +4467,18 @@ int i915_gem_init(struct drm_device *dev)
i915_gem_init_global_gtt(dev);
+ ret = i915_gem_context_init(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
+ i915_gem_context_fini(dev);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
return ret;
}
@@ -4658,14 +4573,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
-static void i915_init_vm(struct drm_i915_private *dev_priv,
- struct i915_address_space *vm)
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
{
+ if (!i915_is_ggtt(vm))
+ drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
- list_add(&vm->global_link, &dev_priv->vm_list);
+ list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
void
@@ -4950,6 +4867,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
+ int ret;
DRM_DEBUG_DRIVER("\n");
@@ -4965,9 +4883,11 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
INIT_DELAYED_WORK(&file_priv->mm.idle_work,
i915_gem_file_idle_work_handler);
- idr_init(&file_priv->context_idr);
+ ret = i915_gem_context_open(dev, file);
+ if (ret)
+ kfree(file_priv);
- return 0;
+ return ret;
}
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -5014,7 +4934,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->active)
continue;
- if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
count += obj->base.size >> PAGE_SHIFT;
}
@@ -5031,7 +4951,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
@@ -5072,7 +4993,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
@@ -5127,7 +5049,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
return NULL;
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+ if (vma->vm != obj_to_ggtt(obj))
return NULL;
return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e08acaba5402..19fd3629795c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,11 +93,19 @@
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
-#define CONTEXT_ALIGN (64<<10)
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
-static struct i915_hw_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct i915_hw_context *to);
+static int do_switch(struct intel_ring_buffer *ring,
+ struct i915_hw_context *to);
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+ if (IS_GEN6(dev))
+ return GEN6_CONTEXT_ALIGN;
+
+ return GEN7_CONTEXT_ALIGN;
+}
static int get_context_size(struct drm_device *dev)
{
@@ -131,14 +139,43 @@ void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ struct i915_hw_ppgtt *ppgtt = NULL;
- list_del(&ctx->link);
+ /* We refcount even the aliasing PPGTT to keep the code symmetric */
+ if (USES_PPGTT(ctx->obj->base.dev))
+ ppgtt = ctx_to_ppgtt(ctx);
+
+ /* XXX: Free up the object before tearing down the address space, in
+ * case we're bound in the PPGTT */
drm_gem_object_unreference(&ctx->obj->base);
+
+ if (ppgtt)
+ kref_put(&ppgtt->ref, ppgtt_release);
+ list_del(&ctx->link);
kfree(ctx);
}
+static struct i915_hw_ppgtt *
+create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = i915_gem_init_ppgtt(dev, ppgtt);
+ if (ret) {
+ kfree(ppgtt);
+ return ERR_PTR(ret);
+ }
+
+ return ppgtt;
+}
+
static struct i915_hw_context *
-create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +203,13 @@ create_hw_context(struct drm_device *dev,
goto err_out;
}
- /* The ring associated with the context object is handled by the normal
- * object tracking code. We give an initial ring value simple to pass an
- * assertion in the context switch code.
- */
- ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
- ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+ ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
@@ -196,67 +228,138 @@ err_out:
return ERR_PTR(ret);
}
-static inline bool is_default_context(struct i915_hw_context *ctx)
-{
- return (ctx == ctx->ring->default_context);
-}
-
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static int create_default_context(struct drm_i915_private *dev_priv)
+static struct i915_hw_context *
+i915_gem_create_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv,
+ bool create_vm)
{
+ const bool is_global_default_ctx = file_priv == NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
- int ret;
+ int ret = 0;
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- ctx = create_hw_context(dev_priv->dev, NULL);
+ ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- /* We may need to do things with the shrinker which require us to
- * immediately switch back to the default context. This can cause a
- * problem as pinning the default context also requires GTT space which
- * may not be available. To avoid this we always pin the
- * default context.
- */
- ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- goto err_destroy;
- }
+ return ctx;
- ret = do_switch(ctx);
- if (ret) {
- DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
- goto err_unpin;
+ if (is_global_default_ctx) {
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ get_context_alignment(dev),
+ false, false);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+ goto err_destroy;
+ }
}
- dev_priv->ring[RCS].default_context = ctx;
+ if (create_vm) {
+ struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+
+ if (IS_ERR_OR_NULL(ppgtt)) {
+ DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
+ ret = PTR_ERR(ppgtt);
+ goto err_unpin;
+ } else
+ ctx->vm = &ppgtt->base;
+
+ /* This case is reserved for the global default context and
+ * should only happen once. */
+ if (is_global_default_ctx) {
+ if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
+ ret = -EEXIST;
+ goto err_unpin;
+ }
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ }
+ } else if (USES_PPGTT(dev)) {
+ /* For platforms which only have aliasing PPGTT, we fake the
+ * address space and refcounting. */
+ ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
+ kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
+ } else
+ ctx->vm = &dev_priv->gtt.base;
- DRM_DEBUG_DRIVER("Default HW context loaded\n");
- return 0;
+ return ctx;
err_unpin:
- i915_gem_object_unpin(ctx->obj);
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx->obj);
err_destroy:
i915_gem_context_unreference(ctx);
- return ret;
+ return ERR_PTR(ret);
+}
+
+void i915_gem_context_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (!HAS_HW_CONTEXTS(dev))
+ return;
+
+ /* Prevent the hardware from restoring the last context (which hung) on
+ * the next switch */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct i915_hw_context *dctx;
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ /* Do a fake switch to the default context */
+ ring = &dev_priv->ring[i];
+ dctx = ring->default_context;
+ if (WARN_ON(!dctx))
+ continue;
+
+ if (!ring->last_context)
+ continue;
+
+ if (ring->last_context == dctx)
+ continue;
+
+ if (i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ get_context_alignment(dev),
+ false, false));
+ /* Fake a finish/inactive */
+ dctx->obj->base.write_domain = 0;
+ dctx->obj->active = 0;
+ }
+
+ i915_gem_context_unreference(ring->last_context);
+ i915_gem_context_reference(dctx);
+ ring->last_context = dctx;
+ }
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct intel_ring_buffer *ring;
+ int i;
if (!HAS_HW_CONTEXTS(dev))
return 0;
- /* If called from reset, or thaw... we've been here already */
- if (dev_priv->ring[RCS].default_context)
+ /* Init should only be called once per module load. Eventually the
+ * restriction on the context_disabled check can be loosened. */
+ if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
@@ -266,11 +369,23 @@ int i915_gem_context_init(struct drm_device *dev)
return -E2BIG;
}
- ret = create_default_context(dev_priv);
- if (ret) {
- DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
- ret);
- return ret;
+ dev_priv->ring[RCS].default_context =
+ i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+
+ if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
+ PTR_ERR(dev_priv->ring[RCS].default_context));
+ return PTR_ERR(dev_priv->ring[RCS].default_context);
+ }
+
+ for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ ring = &dev_priv->ring[i];
+
+ /* NB: RCS will hold a ref for all rings */
+ ring->default_context = dev_priv->ring[RCS].default_context;
}
DRM_DEBUG_DRIVER("HW context support initialized\n");
@@ -281,6 +396,7 @@ void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+ int i;
if (!HAS_HW_CONTEXTS(dev))
return;
@@ -300,59 +416,129 @@ void i915_gem_context_fini(struct drm_device *dev)
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->obj->active);
- i915_gem_object_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_unpin(dctx->obj);
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
+ continue;
+
+ if (ring->last_context)
+ i915_gem_context_unreference(ring->last_context);
+
+ ring->default_context = NULL;
+ ring->last_context = NULL;
+ }
+
+ i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
- dev_priv->ring[RCS].default_context = NULL;
- dev_priv->ring[RCS].last_context = NULL;
+ dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ int ret, i;
+
+ if (!HAS_HW_CONTEXTS(dev_priv->dev))
+ return 0;
+
+ /* This is the only place the aliasing PPGTT gets enabled, which means
+ * it has to happen before we bail on reset */
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ ppgtt->enable(ppgtt);
+ }
+
+ /* FIXME: We should make this work, even in reset */
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return 0;
+
+ BUG_ON(!dev_priv->ring[RCS].default_context);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = do_switch(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_hw_context *ctx = p;
- BUG_ON(id == DEFAULT_CONTEXT_ID);
+ /* Ignore the default context because close will handle it */
+ if (i915_gem_context_is_default(ctx))
+ return 0;
i915_gem_context_unreference(ctx);
return 0;
}
-struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct drm_device *dev,
- struct drm_file *file,
- u32 id)
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (id == DEFAULT_CONTEXT_ID)
- return &file_priv->hang_stats;
+ if (!HAS_HW_CONTEXTS(dev)) {
+ /* Cheat for hang stats */
+ file_priv->private_default_ctx =
+ kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
- if (!HAS_HW_CONTEXTS(dev))
- return ERR_PTR(-ENOENT);
+ if (file_priv->private_default_ctx == NULL)
+ return -ENOMEM;
- ctx = i915_gem_context_get(file->driver_priv, id);
- if (ctx == NULL)
- return ERR_PTR(-ENOENT);
+ file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
+ return 0;
+ }
- return &ctx->hang_stats;
+ idr_init(&file_priv->context_idr);
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->private_default_ctx =
+ i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(file_priv->private_default_ctx)) {
+ idr_destroy(&file_priv->context_idr);
+ return PTR_ERR(file_priv->private_default_ctx);
+ }
+
+ return 0;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ if (!HAS_HW_CONTEXTS(dev)) {
+ kfree(file_priv->private_default_ctx);
+ return;
+ }
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ i915_gem_context_unreference(file_priv->private_default_ctx);
idr_destroy(&file_priv->context_idr);
}
-static struct i915_hw_context *
+struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
- return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ struct i915_hw_context *ctx;
+
+ if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
+ return file_priv->private_default_ctx;
+
+ ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
}
static inline int
@@ -390,7 +576,10 @@ mi_set_context(struct intel_ring_buffer *ring,
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
- /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
intel_ring_emit(ring, MI_NOOP);
if (IS_GEN7(ring->dev))
@@ -403,21 +592,31 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct i915_hw_context *to)
+static int do_switch(struct intel_ring_buffer *ring,
+ struct i915_hw_context *to)
{
- struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *from = ring->last_context;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
int ret, i;
- BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+ if (from != NULL && ring == &dev_priv->ring[RCS]) {
+ BUG_ON(from->obj == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ }
- if (from == to && !to->remap_slice)
+ if (from == to && from->last_ring == ring && !to->remap_slice)
return 0;
- ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
- if (ret)
- return ret;
+ /* Trying to pin first makes error handling easier. */
+ if (ring == &dev_priv->ring[RCS]) {
+ ret = i915_gem_obj_ggtt_pin(to->obj,
+ get_context_alignment(ring->dev),
+ false, false);
+ if (ret)
+ return ret;
+ }
/*
* Pin can switch back to the default context if we end up calling into
@@ -426,6 +625,18 @@ static int do_switch(struct i915_hw_context *to)
*/
from = ring->last_context;
+ if (USES_FULL_PPGTT(ring->dev)) {
+ ret = ppgtt->switch_mm(ppgtt, ring, false);
+ if (ret)
+ goto unpin_out;
+ }
+
+ if (ring != &dev_priv->ring[RCS]) {
+ if (from)
+ i915_gem_context_unreference(from);
+ goto done;
+ }
+
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
@@ -435,22 +646,21 @@ static int do_switch(struct i915_hw_context *to)
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
- if (ret) {
- i915_gem_object_unpin(to->obj);
- return ret;
- }
+ if (ret)
+ goto unpin_out;
- if (!to->obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+ if (!to->obj->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ &dev_priv->gtt.base);
+ vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ }
- if (!to->is_initialized || is_default_context(to))
+ if (!to->is_initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
- if (ret) {
- i915_gem_object_unpin(to->obj);
- return ret;
- }
+ if (ret)
+ goto unpin_out;
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
@@ -484,15 +694,23 @@ static int do_switch(struct i915_hw_context *to)
BUG_ON(from->obj->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->obj);
i915_gem_context_unreference(from);
}
+ to->is_initialized = true;
+
+done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->is_initialized = true;
+ to->last_ring = ring;
return 0;
+
+unpin_out:
+ if (ring->id == RCS)
+ i915_gem_object_ggtt_unpin(to->obj);
+ return ret;
}
/**
@@ -508,31 +726,19 @@ static int do_switch(struct i915_hw_context *to)
*/
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file,
- int to_id)
+ struct i915_hw_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct i915_hw_context *to;
-
- if (!HAS_HW_CONTEXTS(ring->dev))
- return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (ring != &dev_priv->ring[RCS])
- return 0;
-
- if (to_id == DEFAULT_CONTEXT_ID) {
- to = ring->default_context;
- } else {
- if (file == NULL)
- return -EINVAL;
+ BUG_ON(file && to == NULL);
- to = i915_gem_context_get(file->driver_priv, to_id);
- if (to == NULL)
- return -ENOENT;
- }
+ /* We have the fake context, but don't supports switching. */
+ if (!HAS_HW_CONTEXTS(ring->dev))
+ return 0;
- return do_switch(to);
+ return do_switch(ring, to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -553,7 +759,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = create_hw_context(dev, file_priv);
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -575,14 +781,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ return -ENOENT;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file_priv, args->ctx_id);
- if (!ctx) {
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ return PTR_ERR(ctx);
}
idr_remove(&ctx->file_priv->context_idr, ctx->id);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2ca280f9ee53..5168d6a08054 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,7 @@
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- if (vma->obj->pin_count)
+ if (vma->pin_count)
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -46,6 +46,25 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
return drm_mm_scan_add_block(&vma->node);
}
+/**
+ * i915_gem_evict_something - Evict vmas to make room for binding a new one
+ * @dev: drm_device
+ * @vm: address space to evict from
+ * @size: size of the desired free space
+ * @alignment: alignment constraint of the desired free space
+ * @cache_level: cache_level for the desired space
+ * @mappable: whether the free space must be mappable
+ * @nonblocking: whether evicting active objects is allowed or not
+ *
+ * This function will try to evict vmas until a free space satisfying the
+ * requirements is found. Callers must check first whether any such hole exists
+ * already before calling this function.
+ *
+ * This function is used by the object/vma binding code.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
@@ -177,19 +196,19 @@ found:
}
/**
- * i915_gem_evict_vm - Try to free up VM space
+ * i915_gem_evict_vm - Evict all idle vmas from a vm
*
- * @vm: Address space to evict from
+ * @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
- * VM eviction is about freeing up virtual address space. If one wants fine
- * grained eviction, they should see evict something for more details. In terms
- * of freeing up actual system memory, this function may not accomplish the
- * desired result. An object may be shared in multiple address space, and this
- * function will not assert those objects be freed.
+ * This function evicts all idles vmas from a vm. If all unpinned vmas should be
+ * evicted the @do_idle needs to be set to true.
*
- * Using do_idle will result in a more complete eviction because it retires, and
- * inactivates current BOs.
+ * This is used by the execbuf code as a last-ditch effort to defragment the
+ * address space.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
@@ -207,12 +226,20 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
- if (vma->obj->pin_count == 0)
+ if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
return 0;
}
+/**
+ * i915_gem_evict_everything - Try to evict all objects
+ * @dev: Device to evict objects for
+ *
+ * This functions tries to evict all gem objects from all address spaces. Used
+ * by the shrinker as a last-ditch effort and for suspend, before releasing the
+ * backing storage of all unbound objects.
+ */
int
i915_gem_evict_everything(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d269ecf46e26..032def901f98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
struct i915_address_space *vm,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
+ struct i915_address_space *bind_vm = vm;
+
+ if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
+ USES_FULL_PPGTT(vm->dev)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* If we have secure dispatch, or the userspace assures us that
+ * they know what they're doing, use the GGTT VM.
+ */
+ if (((args->flags & I915_EXEC_SECURE) &&
+ (i == (args->buffer_count - 1))))
+ bind_vm = &dev_priv->gtt.base;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
+ vma->pin_count--;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
- struct drm_i915_gem_relocation_entry *reloc,
- struct i915_address_space *vm)
+ struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
+ struct i915_vma *vma =
+ list_first_entry(&target_i915_obj->vma_list,
+ typeof(*vma), vma_link);
+ vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
- vma->vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
if (ret)
return ret;
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
- vma->vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
if (ret)
return ret;
}
@@ -527,11 +541,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
bool *need_reloc)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
- struct drm_i915_gem_object *obj = vma->obj;
+ u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+ !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
int ret;
need_fence =
@@ -560,14 +575,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
}
}
- /* Ensure ppgtt mapping exists if needed */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
-
- obj->has_aliasing_ppgtt_mapping = 1;
- }
-
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
@@ -578,9 +585,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
- !obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ vma->bind_vma(vma, obj->cache_level, flags);
return 0;
}
@@ -891,7 +896,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (likely(!i915_prefault_disable)) {
+ if (likely(!i915.prefault_disable)) {
if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -900,22 +905,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static int
+static struct i915_hw_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- const u32 ctx_id)
+ struct intel_ring_buffer *ring, const u32 ctx_id)
{
+ struct i915_hw_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
- if (IS_ERR(hs))
- return PTR_ERR(hs);
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ return ERR_PTR(-EINVAL);
+
+ ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ if (IS_ERR(ctx))
+ return ctx;
+ hs = &ctx->hang_stats;
if (hs->banned) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
- return -EIO;
+ return ERR_PTR(-EIO);
}
- return 0;
+ return ctx;
}
static void
@@ -939,7 +949,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- if (obj->pin_count) /* check for potential scanout */
+ /* check for potential scanout */
+ if (i915_gem_obj_ggtt_bound(obj) &&
+ i915_gem_obj_to_ggtt(obj)->pin_count)
intel_mark_fb_busy(obj, ring);
}
@@ -989,16 +1001,17 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec,
- struct i915_address_space *vm)
+ struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ struct i915_hw_context *ctx;
+ struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u32 exec_start, exec_len;
+ u32 exec_start = args->batch_start_offset, exec_len;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
@@ -1020,41 +1033,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->ring[RCS];
- break;
- case I915_EXEC_BSD:
- ring = &dev_priv->ring[VCS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
- case I915_EXEC_BLT:
- ring = &dev_priv->ring[BCS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
- case I915_EXEC_VEBOX:
- ring = &dev_priv->ring[VECS];
- if (ctx_id != DEFAULT_CONTEXT_ID) {
- DRM_DEBUG("Ring %s doesn't support contexts\n",
- ring->name);
- return -EPERM;
- }
- break;
-
- default:
+ if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+
+ if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+ ring = &dev_priv->ring[RCS];
+ else
+ ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1125,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- ret = i915_gem_validate_context(dev, file, ctx_id);
- if (ret) {
+ ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
+ ret = PTR_ERR(ctx);
goto pre_mutex_err;
- }
+ }
+
+ i915_gem_context_reference(ctx);
+
+ vm = ctx->vm;
+ if (!USES_FULL_PPGTT(dev))
+ vm = &dev_priv->gtt.base;
eb = eb_create(args);
if (eb == NULL) {
@@ -1187,14 +1183,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+ if (flags & I915_DISPATCH_SECURE &&
+ !batch_obj->has_global_gtt_mapping) {
+ /* When we have multiple VMs, we'll need to make sure that we
+ * allocate space first */
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+ BUG_ON(!vma);
+ vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+ }
+
+ if (flags & I915_DISPATCH_SECURE)
+ exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+ else
+ exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
- ret = i915_switch_context(ring, file, ctx_id);
+ ret = i915_switch_context(ring, file, ctx);
if (ret)
goto err;
@@ -1219,8 +1226,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = i915_gem_obj_offset(batch_obj, vm) +
- args->batch_start_offset;
+
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1255,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
+ /* the request owns the ref now */
+ i915_gem_context_unreference(ctx);
eb_destroy(eb);
mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1278,6 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
- &dev_priv->gtt.base);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1359,6 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1384,8 +1389,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
- &dev_priv->gtt.base);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40a2b36b276b..a4364ae1a2d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -70,6 +71,12 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+static void ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+static void ppgtt_unbind_vma(struct i915_vma *vma);
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
+
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
@@ -199,12 +206,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
- uint64_t val)
+ uint64_t val, bool synchronous)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
BUG_ON(entry >= 4);
+ if (synchronous) {
+ I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
+ I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+ return 0;
+ }
+
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
@@ -220,36 +234,23 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
return 0;
}
-static int gen8_ppgtt_enable(struct drm_device *dev)
+static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i, j, ret;
+ int i, ret;
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
- for_each_ring(ring, dev_priv, j) {
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
-
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
- for_each_ring(ring, dev_priv, j) {
- ret = gen8_write_pdp(ring, i, addr);
- if (ret)
- goto err_out;
- }
+ ret = gen8_write_pdp(ring, i, addr, synchronous);
+ if (ret)
+ return ret;
}
- return 0;
-err_out:
- for_each_ring(ring, dev_priv, j)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
- return ret;
+ return 0;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -324,6 +325,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
+ list_del(&vm->global_link);
drm_mm_takedown(&vm->mm);
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
@@ -386,6 +388,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ppgtt->enable = gen8_ppgtt_enable;
+ ppgtt->switch_mm = gen8_mm_switch;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
@@ -458,6 +461,62 @@ err_out:
return ret;
}
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+ gen6_gtt_pte_t __iomem *pd_addr;
+ gen6_gtt_pte_t scratch_pte;
+ uint32_t pd_entry;
+ int pte, pde;
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+
+ pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+
+ seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
+ ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+ u32 expected;
+ gen6_gtt_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ pd_entry = readl(pd_addr + pde);
+ expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+
+ if (pd_entry != expected)
+ seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ unsigned long va =
+ (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pte * PAGE_SIZE);
+ int i;
+ bool found = false;
+ for (i = 0; i < 4; i++)
+ if (pt_vaddr[pte + i] != scratch_pte)
+ found = true;
+ if (!found)
+ continue;
+
+ seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ for (i = 0; i < 4; i++) {
+ if (pt_vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ else
+ seq_puts(m, " SCRATCH ");
+ }
+ seq_puts(m, "\n");
+ }
+ kunmap_atomic(pt_vaddr);
+ }
+}
+
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -480,61 +539,221 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
readl(pd_addr);
}
-static int gen6_ppgtt_enable(struct drm_device *dev)
+static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
+ BUG_ON(ppgtt->pd_offset & 0x3f);
+
+ return (ppgtt->pd_offset / 64) << 16;
+}
+
+static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ /* XXX: RCS is the only one to auto invalidate the TLBs? */
+ if (ring->id != RCS) {
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_ring_buffer *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!synchronous)
+ return 0;
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+
+ POSTING_READ(RING_PP_DIR_DCLV(ring));
+
+ return 0;
+}
+
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
+ int j, ret;
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ for_each_ring(ring, dev_priv, j) {
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- gen6_write_pdes(ppgtt);
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ goto err_out;
+ }
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
+ return 0;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
+err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
+}
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ uint32_t ecochk, ecobits;
+ int i;
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- uint32_t ecochk, ecobits;
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+ ecochk = I915_READ(GAM_ECOCHK);
+ if (IS_HASWELL(dev)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ I915_WRITE(GAM_ECOCHK, ecochk);
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- I915_WRITE(GAM_ECOCHK, ecochk);
+ for_each_ring(ring, dev_priv, i) {
+ int ret;
/* GFX_MODE is per-ring on gen7+ */
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
+
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
}
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ return 0;
+}
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ uint32_t ecochk, gab_ctl, ecobits;
+ int i;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+ ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ for_each_ring(ring, dev_priv, i) {
+ int ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
}
+
return 0;
}
@@ -608,7 +827,9 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
container_of(vm, struct i915_hw_ppgtt, base);
int i;
+ list_del(&vm->global_link);
drm_mm_takedown(&ppgtt->base.mm);
+ drm_mm_remove_node(&ppgtt->node);
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
@@ -626,20 +847,51 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned first_pd_entry_in_global_pt;
- int i;
- int ret = -ENOMEM;
+ bool retried = false;
+ int i, ret;
- /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
- * entries. For aliasing ppgtt support we just steal them at the end for
- * now. */
- first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+ /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+alloc:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ &ppgtt->node, GEN6_PD_SIZE,
+ GEN6_PD_ALIGN, 0,
+ 0, dev_priv->gtt.base.total,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret == -ENOSPC && !retried) {
+ ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ GEN6_PD_SIZE, GEN6_PD_ALIGN,
+ I915_CACHE_NONE, false, true);
+ if (ret)
+ return ret;
+
+ retried = true;
+ goto alloc;
+ }
+
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- ppgtt->enable = gen6_ppgtt_enable;
+ if (IS_GEN6(dev)) {
+ ppgtt->enable = gen6_ppgtt_enable;
+ ppgtt->switch_mm = gen6_mm_switch;
+ } else if (IS_HASWELL(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = hsw_mm_switch;
+ } else if (IS_GEN7(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = gen7_mm_switch;
+ } else
+ BUG();
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
@@ -648,8 +900,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
- if (!ppgtt->pt_pages)
+ if (!ppgtt->pt_pages) {
+ drm_mm_remove_node(&ppgtt->node);
return -ENOMEM;
+ }
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -678,8 +932,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+ ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+ DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+ ppgtt->node.size >> 20,
+ ppgtt->node.start / PAGE_SIZE);
+ ppgtt->pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
return 0;
@@ -696,19 +955,15 @@ err_pt_alloc:
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
+ drm_mm_remove_node(&ppgtt->node);
return ret;
}
-static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ int ret = 0;
ppgtt->base.dev = dev;
@@ -719,45 +974,42 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
else
BUG();
- if (ret)
- kfree(ppgtt);
- else {
- dev_priv->mm.aliasing_ppgtt = ppgtt;
+ if (!ret) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
+ i915_init_vm(dev_priv, &ppgtt->base);
+ if (INTEL_INFO(dev)->gen < 8) {
+ gen6_write_pdes(ppgtt);
+ DRM_DEBUG("Adding PPGTT at offset %x\n",
+ ppgtt->pd_offset << 10);
+ }
}
return ret;
}
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static void
+ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
- if (!ppgtt)
- return;
+ WARN_ON(flags);
- ppgtt->base.cleanup(&ppgtt->base);
- dev_priv->mm.aliasing_ppgtt = NULL;
+ vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
}
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- cache_level);
-}
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj)
-{
- ppgtt->base.clear_range(&ppgtt->base,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- true);
+ vma->vm->clear_range(vma->vm,
+ entry,
+ vma->obj->base.size >> PAGE_SHIFT,
+ true);
}
extern int intel_iommu_gfx_mapped;
@@ -849,6 +1101,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
i915_check_and_clear_faults(dev);
@@ -859,8 +1112,33 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (!vma)
+ continue;
+
i915_gem_clflush_object(obj, obj->pin_display);
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ /* The bind_vma code tries to be smart about tracking mappings.
+ * Unfortunately above, we've just wiped out the mappings
+ * without telling our object about it. So we need to fake it.
+ */
+ obj->has_global_gtt_mapping = 0;
+ vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ }
+
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ return;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+ if (i915_is_ggtt(vm)) {
+ if (dev_priv->mm.aliasing_ppgtt)
+ gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
+ continue;
+ }
+
+ gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
i915_gem_chipset_flush(dev);
@@ -1017,16 +1295,18 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
readl(gtt_base);
}
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level)
+
+static void i915_ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(st, pg_start, flags);
-
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ vma->obj->has_global_gtt_mapping = 1;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1037,33 +1317,77 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(first_entry, num_entries);
}
+static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+{
+ const unsigned int first = vma->node.start >> PAGE_SHIFT;
+ const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
+
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ vma->obj->has_global_gtt_mapping = 0;
+ intel_gtt_clear_range(first, size);
+}
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+static void ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj = vma->obj;
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
- dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
- entry,
- cache_level);
+ /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+ * or we have a global mapping already but the cacheability flags have
+ * changed, set the global PTEs.
+ *
+ * If there is an aliasing PPGTT it is anecdotally faster, so use that
+ * instead if none of the above hold true.
+ *
+ * NB: A global mapping should only be needed for special regions like
+ * "gtt mappable", SNB errata, or if specified via special execbuf
+ * flags. At all other times, the GPU will use the aliasing PPGTT.
+ */
+ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+ if (!obj->has_global_gtt_mapping ||
+ (cache_level != obj->cache_level)) {
+ vma->vm->insert_entries(vma->vm, obj->pages, entry,
+ cache_level);
+ obj->has_global_gtt_mapping = 1;
+ }
+ }
- obj->has_global_gtt_mapping = 1;
+ if (dev_priv->mm.aliasing_ppgtt &&
+ (!obj->has_aliasing_ppgtt_mapping ||
+ (cache_level != obj->cache_level))) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->obj->pages, entry, cache_level);
+ vma->obj->has_aliasing_ppgtt_mapping = 1;
+ }
}
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- entry,
- obj->base.size >> PAGE_SHIFT,
- true);
+ struct drm_i915_gem_object *obj = vma->obj;
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+
+ if (obj->has_global_gtt_mapping) {
+ vma->vm->clear_range(vma->vm, entry,
+ vma->obj->base.size >> PAGE_SHIFT,
+ true);
+ obj->has_global_gtt_mapping = 0;
+ }
- obj->has_global_gtt_mapping = 0;
+ if (obj->has_aliasing_ppgtt_mapping) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.clear_range(&appgtt->base,
+ entry,
+ obj->base.size >> PAGE_SHIFT,
+ true);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -1155,21 +1479,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1178,26 +1487,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- int ret;
-
- if (INTEL_INFO(dev)->gen <= 7) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
- }
-
- i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (!ret)
- return;
-
- DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
- if (INTEL_INFO(dev)->gen < 8)
- gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
- }
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -1253,7 +1542,7 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
if (bdw_gmch_ctl > 4) {
- WARN_ON(!i915_preliminary_hw_support);
+ WARN_ON(!i915.preliminary_hw_support);
return 4<<20;
}
@@ -1438,7 +1727,6 @@ static int i915_gmch_probe(struct drm_device *dev,
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -1493,3 +1781,62 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->exec_list);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ switch (INTEL_INFO(vm->dev)->gen) {
+ case 8:
+ case 7:
+ case 6:
+ if (i915_is_ggtt(vm)) {
+ vma->unbind_vma = ggtt_unbind_vma;
+ vma->bind_vma = ggtt_bind_vma;
+ } else {
+ vma->unbind_vma = ppgtt_unbind_vma;
+ vma->bind_vma = ppgtt_bind_vma;
+ }
+ break;
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ BUG_ON(!i915_is_ggtt(vm));
+ vma->unbind_vma = i915_ggtt_unbind_vma;
+ vma->bind_vma = i915_ggtt_bind_vma;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b13905348048..eb993584aa6b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (obj->pin_count || obj->framebuffer_references) {
+ if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 990cf8f43efd..000b3694f349 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
+ struct drm_i915_error_ring *ring)
{
- BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
- if (!error->ring[ring].valid)
+ if (!ring->valid)
return;
- err_printf(m, "%s command stream:\n", ring_str(ring));
- err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
- err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ err_printf(m, " HEAD: 0x%08x\n", ring->head);
+ err_printf(m, " TAIL: 0x%08x\n", ring->tail);
+ err_printf(m, " CTL: 0x%08x\n", ring->ctl);
+ err_printf(m, " HWS: 0x%08x\n", ring->hws);
+ err_printf(m, " ACTHD: 0x%08x\n", ring->acthd);
+ err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
+ err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
+ err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
if (INTEL_INFO(dev)->gen >= 4) {
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
- err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
- err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ err_printf(m, " BBADDR: 0x%08llx\n", ring->bbaddr);
+ err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
+ err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
}
- err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
+ err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
- err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
+ err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][0],
- error->semaphore_seqno[ring][0]);
+ ring->semaphore_mboxes[0],
+ ring->semaphore_seqno[0]);
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][1],
- error->semaphore_seqno[ring][1]);
+ ring->semaphore_mboxes[1],
+ ring->semaphore_seqno[1]);
if (HAS_VEBOX(dev)) {
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][2],
- error->semaphore_seqno[ring][2]);
+ ring->semaphore_mboxes[2],
+ ring->semaphore_seqno[2]);
}
}
- err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+ if (USES_PPGTT(dev)) {
+ err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ int i;
+ for (i = 0; i < 4; i++)
+ err_printf(m, " PDP%d: 0x%016llx\n",
+ i, ring->vm_info.pdp[i]);
+ } else {
+ err_printf(m, " PP_DIR_BASE: 0x%08x\n",
+ ring->vm_info.pp_dir_base);
+ }
+ }
+ err_printf(m, " seqno: 0x%08x\n", ring->seqno);
+ err_printf(m, " waiting: %s\n", yesno(ring->waiting));
+ err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
+ err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
err_printf(m, " hangcheck: %s [%d]\n",
- hangcheck_action_to_str(error->hangcheck_action[ring]),
- error->hangcheck_score[ring]);
+ hangcheck_action_to_str(ring->hangcheck_action),
+ ring->hangcheck_score);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -333,8 +344,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++)
- i915_ring_error_state(m, dev, error, i);
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ err_printf(m, "%s command stream:\n", ring_str(i));
+ i915_ring_error_state(m, dev, &error->ring[i]);
+ }
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -390,6 +403,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if ((obj = error->ring[i].hws_page)) {
+ err_printf(m, "%s --- HW Status = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
+
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
@@ -472,6 +501,7 @@ static void i915_error_state_free(struct kref *error_ref)
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
kfree(error->ring[i].requests);
}
@@ -485,6 +515,7 @@ static void i915_error_state_free(struct kref *error_ref)
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
+ struct i915_address_space *vm,
const int num_pages)
{
struct drm_i915_error_object *dst;
@@ -498,7 +529,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL)
return NULL;
- reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+ reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -508,8 +539,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->gtt.mappable_end &&
- src->has_global_gtt_mapping) {
+ if (src->cache_level == I915_CACHE_NONE &&
+ reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping &&
+ i915_is_ggtt(vm)) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -559,8 +592,12 @@ unwind:
kfree(dst);
return NULL;
}
-#define i915_error_object_create(dev_priv, src) \
- i915_error_object_create_sized((dev_priv), (src), \
+#define i915_error_object_create(dev_priv, src, vm) \
+ i915_error_object_create_sized((dev_priv), (src), (vm), \
+ (src)->base.size>>PAGE_SHIFT)
+
+#define i915_error_ggtt_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
(src)->base.size>>PAGE_SHIFT)
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -575,7 +612,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
err->pinned = 0;
- if (obj->pin_count > 0)
+ if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
@@ -608,7 +645,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(obj, head, global_list) {
- if (obj->pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj))
continue;
capture_bo(err++, obj);
@@ -619,6 +656,33 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
return i;
}
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ uint32_t error_code = 0;
+ int i;
+
+ /* IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
+ return error->ring[i].ipehr ^ error->ring[i].instdone;
+
+ return error_code;
+}
+
static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_error_state *error)
{
@@ -652,6 +716,32 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
+/* This assumes all batchbuffers are executed from the PPGTT. It might have to
+ * change in the future. */
+static bool is_active_vm(struct i915_address_space *vm,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt;
+
+ if (INTEL_INFO(dev)->gen < 7)
+ return i915_is_ggtt(vm);
+
+ /* FIXME: This ignores that the global gtt vm is also on this list. */
+ ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
+ pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
+ return pdp0 == ppgtt->pd_dma_addr[0];
+ } else {
+ u32 pp_db;
+ pp_db = I915_READ(RING_PP_DIR_BASE(ring));
+ return (pp_db >> 10) == ppgtt->pd_offset;
+ }
+}
+
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
@@ -659,6 +749,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct i915_address_space *vm;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
+ bool found_active = false;
u32 seqno;
if (!ring->get_seqno)
@@ -674,11 +765,16 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (obj != NULL &&
acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
- return i915_error_object_create(dev_priv, obj);
+ return i915_error_ggtt_object_create(dev_priv, obj);
}
seqno = ring->get_seqno(ring, false);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ if (!is_active_vm(vm, ring))
+ continue;
+
+ found_active = true;
+
list_for_each_entry(vma, &vm->active_list, mm_list) {
obj = vma->obj;
if (obj->ring != ring)
@@ -693,66 +789,120 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
- return i915_error_object_create(dev_priv, obj);
+ return i915_error_object_create(dev_priv, obj, vm);
}
}
+ WARN_ON(!found_active);
return NULL;
}
static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
+ ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+ ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+ ering->semaphore_mboxes[0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
+ ering->semaphore_mboxes[1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
- error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
- error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ ering->semaphore_seqno[0] = ring->sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->sync_seqno[1];
}
if (HAS_VEBOX(dev)) {
- error->semaphore_mboxes[ring->id][2] =
+ ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
- error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+ ering->semaphore_seqno[2] = ring->sync_seqno[2];
}
if (INTEL_INFO(dev)->gen >= 4) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+ ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+ ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+ ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+ ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
if (INTEL_INFO(dev)->gen >= 8)
- error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
- error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
+ ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
- error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
+ ering->faddr = I915_READ(DMA_FADD_I8XX);
+ ering->ipeir = I915_READ(IPEIR);
+ ering->ipehr = I915_READ(IPEHR);
+ ering->instdone = I915_READ(INSTDONE);
}
- error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring, false);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
- error->ctl[ring->id] = I915_READ_CTL(ring);
+ ering->waiting = waitqueue_active(&ring->irq_queue);
+ ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+ ering->seqno = ring->get_seqno(ring, false);
+ ering->acthd = intel_ring_get_active_head(ring);
+ ering->head = I915_READ_HEAD(ring);
+ ering->tail = I915_READ_TAIL(ring);
+ ering->ctl = I915_READ_CTL(ring);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ int mmio;
+
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ default:
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ ering->hws = I915_READ(mmio);
+ }
+
+ ering->cpu_ring_head = ring->head;
+ ering->cpu_ring_tail = ring->tail;
+
+ ering->hangcheck_score = ring->hangcheck.score;
+ ering->hangcheck_action = ring->hangcheck.action;
+
+ if (USES_PPGTT(dev)) {
+ int i;
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
+ ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
- error->hangcheck_score[ring->id] = ring->hangcheck.score;
- error->hangcheck_action[ring->id] = ring->hangcheck.action;
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ for (i = 0; i < 4; i++) {
+ ering->vm_info.pdp[i] =
+ I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ ering->vm_info.pdp[i] <<= 32;
+ ering->vm_info.pdp[i] |=
+ I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ }
+ break;
+ case 7:
+ ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring);
+ break;
+ case 6:
+ ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring);
+ break;
+ }
+ }
}
@@ -770,7 +920,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
- obj, 1);
+ obj,
+ &dev_priv->gtt.base,
+ 1);
break;
}
}
@@ -791,14 +943,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, ring);
+ i915_record_ring_state(dev, ring, &error->ring[i]);
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
+ i915_error_ggtt_object_create(dev_priv, ring->obj);
+ if (ring->status_page.obj)
+ error->ring[i].hws_page =
+ i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_gem_record_active_context(ring, error, &error->ring[i]);
@@ -845,7 +1000,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
+ if (i915_gem_obj_is_pinned(obj))
i++;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@@ -879,11 +1034,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
cnt++;
- if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
- cnt = 1;
-
- vm = &dev_priv->gtt.base;
-
error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
@@ -895,6 +1045,74 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
i915_gem_capture_vm(dev_priv, error, vm, i++);
}
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int pipe;
+
+ /* General organization
+ * 1. Registers specific to a single generation
+ * 2. Registers which belong to multiple generations
+ * 3. Feature specific registers.
+ * 4. Everything else
+ * Please try to follow the order.
+ */
+
+ /* 1: Registers specific to a single generation */
+ if (IS_VALLEYVIEW(dev)) {
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ }
+
+ if (IS_GEN7(dev))
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ if (IS_GEN6(dev)) {
+ error->forcewake = I915_READ(FORCEWAKE);
+ error->gab_ctl = I915_READ(GAB_CTL);
+ error->gfx_mode = I915_READ(GFX_MODE);
+ }
+
+ if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+
+ /* 2: Registers which belong to multiple generations */
+ if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->derrmr = I915_READ(DERRMR);
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ /* 3: Feature specific registers */
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ error->gam_ecochk = I915_READ(GAM_ECOCHK);
+ error->gac_eco = I915_READ(GAC_ECO_BITS);
+ }
+
+ /* 4: Everything else */
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else {
+ error->ier = I915_READ(IER);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+ }
+
+ /* 4: Everything else */
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -906,10 +1124,11 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
*/
void i915_capture_error_state(struct drm_device *dev)
{
+ static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- int pipe;
+ uint32_t ecode;
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
@@ -926,53 +1145,22 @@ void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
dev->primary->index);
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-
kref_init(&error->ref);
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- if (HAS_HW_CONTEXTS(dev))
- error->ccid = I915_READ(CCID);
-
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else if (IS_VALLEYVIEW(dev))
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
- else if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
-
- if (INTEL_INFO(dev)->gen >= 6)
- error->derrmr = I915_READ(DERRMR);
-
- if (IS_VALLEYVIEW(dev))
- error->forcewake = I915_READ(FORCEWAKE_VLV);
- else if (INTEL_INFO(dev)->gen >= 7)
- error->forcewake = I915_READ(FORCEWAKE_MT);
- else if (INTEL_INFO(dev)->gen == 6)
- error->forcewake = I915_READ(FORCEWAKE);
-
- if (!HAS_PCH_SPLIT(dev))
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- error->err_int = I915_READ(GEN7_ERR_INT);
-
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
+ ecode = i915_error_generate_code(dev_priv, error);
+
+ if (!warned) {
+ DRM_INFO("GPU HANG [%x]\n", ecode);
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+ warned = true;
+ }
do_gettimeofday(&error->time);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9fec71175571..e9c94c91c6a5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
return true;
}
+static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+}
+
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
@@ -393,7 +405,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
intel_crtc->cpu_fifo_underrun_disabled = !enable;
- if (IS_GEN5(dev) || IS_GEN6(dev))
+ if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
+ i9xx_clear_fifo_underrun(dev, pipe);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -915,6 +929,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
+static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+}
+
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -966,6 +985,43 @@ static void notify_ring(struct drm_device *dev,
i915_queue_hangcheck(dev);
}
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
+ u32 pm_iir, int new_delay)
+{
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (new_delay >= dev_priv->rps.max_delay) {
+ /* Mask UP THRESHOLD Interrupts */
+ I915_WRITE(GEN6_PMINTRMSK,
+ I915_READ(GEN6_PMINTRMSK) |
+ GEN6_PM_RP_UP_THRESHOLD);
+ dev_priv->rps.rp_up_masked = true;
+ }
+ if (dev_priv->rps.rp_down_masked) {
+ /* UnMask DOWN THRESHOLD Interrupts */
+ I915_WRITE(GEN6_PMINTRMSK,
+ I915_READ(GEN6_PMINTRMSK) &
+ ~GEN6_PM_RP_DOWN_THRESHOLD);
+ dev_priv->rps.rp_down_masked = false;
+ }
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (new_delay <= dev_priv->rps.min_delay) {
+ /* Mask DOWN THRESHOLD Interrupts */
+ I915_WRITE(GEN6_PMINTRMSK,
+ I915_READ(GEN6_PMINTRMSK) |
+ GEN6_PM_RP_DOWN_THRESHOLD);
+ dev_priv->rps.rp_down_masked = true;
+ }
+
+ if (dev_priv->rps.rp_up_masked) {
+ /* UnMask UP THRESHOLD Interrupts */
+ I915_WRITE(GEN6_PMINTRMSK,
+ I915_READ(GEN6_PMINTRMSK) &
+ ~GEN6_PM_RP_UP_THRESHOLD);
+ dev_priv->rps.rp_up_masked = false;
+ }
+ }
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
@@ -1023,6 +1079,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
*/
new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+
+ gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1236,6 +1294,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
if (!hotplug_trigger)
return;
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_trigger);
+
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -1415,17 +1476,52 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
+static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pipe_stats[I915_MAX_PIPES];
+ int pipe;
+
+ spin_lock(&dev_priv->irq_lock);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff)
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ spin_unlock(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
- unsigned long irqflags;
- int pipe;
- u32 pipe_stats[I915_MAX_PIPES];
-
- atomic_inc(&dev_priv->irq_received);
while (true) {
iir = I915_READ(VLV_IIR);
@@ -1439,44 +1535,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(dev, pipe);
-
- if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
- }
+ valleyview_pipestat_irq_handler(dev, iir);
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
@@ -1486,8 +1551,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT);
}
- if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1546,12 +1609,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
- DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
- DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
}
static void ivb_err_int_handler(struct drm_device *dev)
@@ -1567,8 +1630,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@ -1593,17 +1656,17 @@ static void cpt_serr_int_handler(struct drm_device *dev)
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
- DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
- DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
- DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+ DRM_ERROR("PCH transcoder C FIFO underrun\n");
I915_WRITE(SERR_INT, serr_int);
}
@@ -1665,8 +1728,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -1738,8 +1801,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- atomic_inc(&dev_priv->irq_received);
-
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
@@ -1808,8 +1869,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
uint32_t tmp = 0;
enum pipe pipe;
- atomic_inc(&dev_priv->irq_received);
-
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
@@ -1871,8 +1930,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
- DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@ -2244,18 +2303,11 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- imr = I915_READ(VLV_IMR);
- if (pipe == PIPE_A)
- imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- else
- imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- I915_WRITE(VLV_IMR, imr);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2313,17 +2365,10 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 imr;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- imr = I915_READ(VLV_IMR);
- if (pipe == PIPE_A)
- imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- else
- imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2479,9 +2524,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
#define BUSY 1
#define KICK 5
#define HUNG 20
-#define FIRE 30
- if (!i915_enable_hangcheck)
+ if (!i915.enable_hangcheck)
return;
for_each_ring(ring, dev_priv, i) {
@@ -2563,7 +2607,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
for_each_ring(ring, dev_priv, i) {
- if (ring->hangcheck.score > FIRE) {
+ if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
@@ -2583,7 +2627,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!i915_enable_hangcheck)
+ if (!i915.enable_hangcheck)
return;
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@ -2632,8 +2676,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(HWSTAM, 0xeffe);
I915_WRITE(DEIMR, 0xffffffff);
@@ -2650,8 +2692,6 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2681,8 +2721,6 @@ static void gen8_irq_preinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3007,8 +3045,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(GEN8_MASTER_IRQ, 0);
#define GEN8_IRQ_FINI_NDX(type, which) do { \
@@ -3049,7 +3085,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3072,7 +3108,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3101,8 +3137,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff);
@@ -3187,8 +3221,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
@@ -3210,12 +3242,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
/*
* Clear the PIPE*STAT regs before the IIR
*/
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
+ if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
- }
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3238,6 +3266,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
iir = new_iir;
@@ -3266,8 +3298,6 @@ static void i915_irq_preinstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3373,8 +3403,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
@@ -3395,9 +3423,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* Clear the PIPE*STAT regs before the IIR */
if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = true;
}
@@ -3413,9 +3438,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
@@ -3442,6 +3464,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3476,7 +3502,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3500,8 +3526,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3610,21 +3634,17 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
- int irq_received;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- atomic_inc(&dev_priv->irq_received);
-
iir = I915_READ(IIR);
for (;;) {
+ bool irq_received = (iir & ~flip_mask) != 0;
bool blc_event = false;
- irq_received = (iir & ~flip_mask) != 0;
-
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
@@ -3642,11 +3662,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
+ irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3663,9 +3680,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
HOTPLUG_INT_STATUS_G4X :
HOTPLUG_INT_STATUS_I915);
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
-
intel_hpd_irq_handler(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
@@ -3695,8 +3709,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- }
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
@@ -3735,7 +3752,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ intel_hpd_irq_uninstall(dev_priv);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3752,7 +3769,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void i915_reenable_hotplug_timer_func(unsigned long data)
+static void intel_hpd_irq_reenable(unsigned long data)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
struct drm_device *dev = dev_priv->dev;
@@ -3799,7 +3816,7 @@ void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
- setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+ setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
(unsigned long) dev_priv);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 000000000000..c743057b6511
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+
+struct i915_params i915 __read_mostly = {
+ .modeset = -1,
+ .panel_ignore_lid = 1,
+ .powersave = 1,
+ .semaphores = -1,
+ .lvds_downclock = 0,
+ .lvds_channel_mode = 0,
+ .panel_use_ssc = -1,
+ .vbt_sdvo_panel_type = -1,
+ .enable_rc6 = -1,
+ .enable_fbc = -1,
+ .enable_hangcheck = true,
+ .enable_ppgtt = -1,
+ .enable_psr = 0,
+ .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .disable_power_well = 1,
+ .enable_ips = 1,
+ .fastboot = 0,
+ .enable_pc8 = 1,
+ .pc8_timeout = 5000,
+ .prefault_disable = 0,
+ .reset = true,
+ .invert_brightness = 0,
+};
+
+module_param_named(modeset, i915.modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
+
+module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
+
+module_param_named(powersave, i915.powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+module_param_named(semaphores, i915.semaphores, int, 0400);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync "
+ "(default: -1 (use per-chip defaults))");
+
+module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+MODULE_PARM_DESC(enable_rc6,
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
+
+module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+MODULE_PARM_DESC(enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
+
+module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+module_param_named(reset, i915.reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
+
+module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+MODULE_PARM_DESC(enable_ppgtt,
+ "Override PPGTT usage. "
+ "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+
+module_param_named(enable_psr, i915.enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support.");
+
+module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+ "Disable the power well when possible (default: true)");
+
+module_param_named(enable_ips, i915.enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+ "Try to skip unnecessary mode sets at boot time (default: false)");
+
+module_param_named(enable_pc8, i915.enable_pc8, int, 0600);
+MODULE_PARM_DESC(enable_pc8,
+ "Enable support for low power package C states (PC8+) (default: true)");
+
+module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600);
+MODULE_PARM_DESC(pc8_timeout,
+ "Number of msecs of idleness required to enter PC8+ (default: 5000)");
+
+module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). "
+ "For developers only.");
+
+module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
+MODULE_PARM_DESC(invert_brightness,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a48b7cad6f11..cc3ea049269b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,7 +26,6 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
-#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -73,7 +72,8 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define LBB 0xf4
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
@@ -934,6 +934,8 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
+#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
+#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
@@ -1046,9 +1048,8 @@
#define FBC_CTL_IDLE_LINE (2<<2)
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANEA (0<<0)
-#define FBC_CTL_PLANEB (1<<0)
-#define FBC_FENCE_OFF 0x0321b
+#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
@@ -1057,9 +1058,8 @@
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANEA (0<<30)
-#define DPFC_CTL_PLANEB (1<<30)
-#define IVB_DPFC_CTL_PLANE_SHIFT (29)
+#define DPFC_CTL_PLANE(plane) ((plane)<<30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
#define DPFC_CTL_FENCE_EN (1<<29)
#define IVB_DPFC_CTL_FENCE_EN (1<<28)
#define DPFC_CTL_PERSISTENT_MODE (1<<25)
@@ -1202,6 +1202,10 @@
/*
* Clock control & power management
*/
+#define DPLL_A_OFFSET 0x6014
+#define DPLL_B_OFFSET 0x6018
+#define DPLL(pipe) (dev_priv->info->dpll_offsets[pipe] + \
+ dev_priv->info->display_mmio_offset)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1214,9 +1218,6 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
-#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_SDVO_HIGH_SPEED (1 << 30)
#define DPLL_DVO_2X_MODE (1 << 30)
@@ -1278,7 +1279,12 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
+
+#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
+#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define DPLL_MD(pipe) (dev_priv->info->dpll_md_offsets[pipe] + \
+ dev_priv->info->display_mmio_offset)
+
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -1315,8 +1321,6 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
#define _FPA1 0x06044
@@ -1472,10 +1476,10 @@
/*
* Palette regs
*/
-
-#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
-#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+#define PALETTE_A_OFFSET 0xa000
+#define PALETTE_B_OFFSET 0xa800
+#define PALETTE(pipe) (dev_priv->info->palette_offsets[pipe] + \
+ dev_priv->info->display_mmio_offset)
/* MCH MMIO space */
@@ -1862,7 +1866,7 @@
*/
/* Pipe A CRC regs */
-#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
+#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
@@ -1902,11 +1906,11 @@
#define _PIPE_CRC_RES_4_A_IVB 0x60070
#define _PIPE_CRC_RES_5_A_IVB 0x60074
-#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
-#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
-#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
-#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
-#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
+#define _PIPE_CRC_RES_RED_A 0x60060
+#define _PIPE_CRC_RES_GREEN_A 0x60064
+#define _PIPE_CRC_RES_BLUE_A 0x60068
+#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
+#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
/* Pipe B CRC regs */
#define _PIPE_CRC_RES_1_B_IVB 0x61064
@@ -1915,59 +1919,69 @@
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
-#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
#define PIPE_CRC_RES_1_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) \
- _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
#define PIPE_CRC_RES_RED(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
#define PIPE_CRC_RES_GREEN(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
#define PIPE_CRC_RES_BLUE(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
#define PIPE_CRC_RES_RES1_I915(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) \
- _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
/* Pipe A timing regs */
-#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
-#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
-#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
-#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
-#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
-#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
-#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
-#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
-#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
-#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
-#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
-#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
-#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
-#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
-#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
-#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
-#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
-#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
-
-#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
-#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
-#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
+#define TRANSCODER_A_OFFSET 0x60000
+#define TRANSCODER_B_OFFSET 0x61000
+#define TRANSCODER_C_OFFSET 0x62000
+#define TRANSCODER_EDP_OFFSET 0x6f000
+
+#define _TRANSCODER2(pipe, reg) (dev_priv->info->trans_offsets[(pipe)] - \
+ dev_priv->info->trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info->display_mmio_offset)
+
+#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
+#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
+#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
+#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
+#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
+#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
+#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
+#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
+#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -3178,10 +3192,10 @@
/* Display & cursor control */
/* Pipe A */
-#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
+#define _PIPEADSL 0x70000
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
-#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
+#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -3224,9 +3238,9 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
@@ -3244,12 +3258,12 @@
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -3262,12 +3276,26 @@
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
-#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
-#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
-#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define PIPE_A_OFFSET 0x70000
+#define PIPE_B_OFFSET 0x71000
+#define PIPE_C_OFFSET 0x72000
+/*
+ * There's actually no pipe EDP. Some pipe registers have
+ * simply shifted from the pipe to the transcoder, while
+ * keeping their original offset. Thus we need PIPE_EDP_OFFSET
+ * to access such registers in transcoder EDP.
+ */
+#define PIPE_EDP_OFFSET 0x7f000
+
+#define _PIPE2(pipe, reg) (dev_priv->info->pipe_offsets[pipe] - \
+ dev_priv->info->pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info->display_mmio_offset)
+
+#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
+#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
@@ -3279,20 +3307,20 @@
#define PIPEMISC_DITHER_ENABLE (1<<4)
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
-#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIPDONE_INT_EN (1<<26)
-#define SPRITEC_FLIPDONE_INT_EN (1<<25)
-#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define SPRITED_FLIP_DONE_INT_EN (1<<26)
+#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
+#define PLANEB_FLIP_DONE_INT_EN (1<<24)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIPDONE_INT_EN (1<<18)
-#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
+#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
@@ -3520,7 +3548,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
-#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3554,25 +3582,25 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
-#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
-#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
-#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
-#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
-#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
-#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
-#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
-
-#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
+
+#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
+#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
+#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
+#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
+#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
+#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
-#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3866,48 +3894,45 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
+#define _PIPEA_DATA_M1 0x60030
#define PIPE_DATA_M1_OFFSET 0
-#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
+#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
-#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
+#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
-#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
+#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
-#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
+#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
-#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
+#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
-#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
+#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
-#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
+#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
-#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
-
-#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
-#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
-
-#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
-#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
-
-#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
-#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
-
-#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
+
+#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4120,6 +4145,8 @@
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define HSW_NDE_RSTWRN_OPT 0x46408
+#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -4127,6 +4154,9 @@
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+#define GEN7_L3SQCREG1 0xB010
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_L3AGDIS (1<<19)
@@ -4436,24 +4466,24 @@
#define HSW_VIDEO_DIP_GCP_B 0x61210
#define HSW_TVIDEO_DIP_CTL(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
#define HSW_TVIDEO_DIP_GCP(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
- _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
#define HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
#define HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) \
- _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+ _PIPE2(trans, HSW_STEREO_3D_CTL_A)
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -4945,6 +4975,10 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
+#define VLV_GFX_CLK_STATUS_BIT (1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
@@ -5178,8 +5212,8 @@
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
- TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
+
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
@@ -5311,8 +5345,12 @@
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_LCPLL (3<<28)
+#define SPLL_PLL_REF_MASK (3<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_2700MHz (2<<26)
+#define SPLL_PLL_FREQ_MASK (3<<26)
/* WRPLL */
#define WRPLL_CTL1 0x46040
@@ -5323,8 +5361,13 @@
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
@@ -5337,6 +5380,7 @@
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
+#define PORT_CLK_SEL_MASK (7<<29)
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
@@ -5346,10 +5390,12 @@
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
- _TRANSB_MSA_MISC)
+#define TRANSA_MSA_MISC 0x60410
+#define TRANSB_MSA_MISC 0x61410
+#define TRANSC_MSA_MISC 0x62410
+#define TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
+
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
@@ -5857,4 +5903,12 @@
#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
+/* For UMS only (deprecated): */
+#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
+#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8150fdc08d49..56785e8fb2eb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- /* Only regfile.save FBC state on the platform that supports FBC */
- if (HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- }
- }
+ /* save FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_vga(dev);
@@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
- if (HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
- } else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
- } else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- }
- }
+
+ /* restore FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_vga(dev);
@@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (INTEL_INFO(dev)->gen <= 4)
- pci_read_config_byte(dev->pdev, LBB,
- &dev_priv->regfile.saveLBB);
-
mutex_lock(&dev->struct_mutex);
i915_save_display(dev);
@@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (INTEL_INFO(dev)->gen <= 4)
- pci_write_config_byte(dev->pdev, LBB,
- dev_priv->regfile.saveLBB);
-
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 33bcae314bf8..0c741f4eefb0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -357,6 +357,11 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
else
gen6_set_rps(dev, val);
}
+ else if (!IS_VALLEYVIEW(dev))
+ /* We still need gen6_set_rps to process the new max_delay
+ and update the interrupt limits even though frequency
+ request is unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -426,6 +431,11 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
else
gen6_set_rps(dev, val);
}
+ else if (!IS_VALLEYVIEW(dev))
+ /* We still need gen6_set_rps to process the new min_delay
+ and update the interrupt limits even though frequency
+ request is unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index caa18e855815..480da593e6c0 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev)
/* FIXME: regfile.save TV & SDVO state */
/* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_read_config_byte(dev->pdev, PCI_LBPC,
+ &dev_priv->regfile.saveLBB);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev)
int i;
/* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_write_config_byte(dev->pdev, PCI_LBPC,
+ dev_priv->regfile.saveLBB);
+
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f22041973f3a..86b95ca413d1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
downclock = dvo_timing->clock;
}
- if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
@@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915_vbt_sdvo_panel_type;
+ index = i915.vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e2e39e65f109..5b444a4b625c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -857,4 +857,6 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
}
+
+ intel_crt_reset(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e06b9e017d6b..cd65dd04ba20 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */
}
+static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ int reg)
+{
+ int refclk = LC_FREQ;
+ int n, p, r;
+ u32 wrpll;
+
+ wrpll = I915_READ(reg);
+ switch (wrpll & SPLL_PLL_REF_MASK) {
+ case SPLL_PLL_SSC:
+ case SPLL_PLL_NON_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = 135;
+ break;
+ case SPLL_PLL_LCPLL:
+ refclk = LC_FREQ;
+ break;
+ default:
+ WARN(1, "bad wrpll refclk\n");
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n * 100) / (p * r);
+}
+
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ int link_clock = 0;
+ u32 val, pll;
+
+ val = I915_READ(PORT_CLK_SEL(port));
+ switch (val & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+ break;
+ case PORT_CLK_SEL_SPLL:
+ pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
+ if (pll == SPLL_PLL_FREQ_810MHz)
+ link_clock = 81000;
+ else if (pll == SPLL_PLL_FREQ_1350MHz)
+ link_clock = 135000;
+ else if (pll == SPLL_PLL_FREQ_2700MHz)
+ link_clock = 270000;
+ else {
+ WARN(1, "bad spll freq\n");
+ return;
+ }
+ break;
+ default:
+ WARN(1, "bad port clock sel\n");
+ return;
+ }
+
+ pipe_config->port_clock = link_clock * 2;
+
+ if (pipe_config->has_pch_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->fdi_m_n);
+ else if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
@@ -1200,7 +1291,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_on(intel_dp);
+ intel_edp_panel_on(intel_dp);
}
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1244,7 +1335,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- ironlake_edp_panel_off(intel_dp);
+ intel_edp_panel_off(intel_dp);
}
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
@@ -1279,7 +1370,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
if (port == PORT_A)
intel_dp_stop_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
}
@@ -1312,7 +1403,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_psr_disable(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
+ intel_edp_backlight_off(intel_dp);
}
}
@@ -1509,6 +1600,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
+
+ intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c1672809493..0f4cbd0aa59e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2372,7 +2372,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
- if (i915_fastboot) {
+ if (i915.fastboot) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
@@ -4088,9 +4088,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
/* Looks like the 200MHz CDclk freq doesn't work on some configs */
}
-static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
- unsigned modeset_pipes,
- struct intel_crtc_config *pipe_config)
+/* compute the max pixel clock for new configuration */
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc;
@@ -4098,31 +4097,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- if (modeset_pipes & (1 << intel_crtc->pipe))
- max_pixclk = max(max_pixclk,
- pipe_config->adjusted_mode.crtc_clock);
- else if (intel_crtc->base.enabled)
+ if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
- intel_crtc->config.adjusted_mode.crtc_clock);
+ intel_crtc->new_config->adjusted_mode.crtc_clock);
}
return max_pixclk;
}
static void valleyview_modeset_global_pipes(struct drm_device *dev,
- unsigned *prepare_pipes,
- unsigned modeset_pipes,
- struct intel_crtc_config *pipe_config)
+ unsigned *prepare_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
- pipe_config);
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
return;
+ /* disable/enable all currently active pipes while we change cdclk */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head)
if (intel_crtc->base.enabled)
@@ -4132,7 +4126,7 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -4176,6 +4170,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -4214,6 +4209,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, false);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
@@ -4272,6 +4268,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_planes(crtc);
intel_disable_primary_plane(dev_priv, plane, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_disable_pipe(dev_priv, pipe);
i9xx_pfit_disable(intel_crtc);
@@ -4583,7 +4580,7 @@ retry:
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
- pipe_config->ips_enabled = i915_enable_ips &&
+ pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
}
@@ -4784,8 +4781,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915_panel_use_ssc >= 0)
- return i915_panel_use_ssc != 0;
+ if (i915.panel_use_ssc >= 0)
+ return i915.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -4844,7 +4841,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915_powersave) {
+ reduced_clock && i915.powersave) {
I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
@@ -6348,7 +6345,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
- if (is_lvds && has_reduced_clock && i915_powersave)
+ if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
@@ -6716,7 +6713,7 @@ static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
return;
schedule_delayed_work(&dev_priv->pc8.enable_work,
- msecs_to_jiffies(i915_pc8_timeout));
+ msecs_to_jiffies(i915.pc8_timeout));
}
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
@@ -6815,7 +6812,7 @@ static void hsw_update_package_c8(struct drm_device *dev)
if (!HAS_PC8(dev_priv->dev))
return;
- if (!i915_enable_pc8)
+ if (!i915.enable_pc8)
return;
mutex_lock(&dev_priv->pc8.lock);
@@ -7855,6 +7852,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
+ intel_crtc->new_enabled = true;
+ intel_crtc->new_config = &intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -7878,21 +7877,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- mutex_unlock(&crtc->mutex);
- return false;
+ goto fail;
}
if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- mutex_unlock(&crtc->mutex);
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
+
+ fail:
+ intel_crtc->new_enabled = crtc->enabled;
+ if (intel_crtc->new_enabled)
+ intel_crtc->new_config = &intel_crtc->config;
+ else
+ intel_crtc->new_config = NULL;
+ mutex_unlock(&crtc->mutex);
+ return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -7902,6 +7908,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
@@ -7910,6 +7917,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
+ intel_crtc->new_enabled = false;
+ intel_crtc->new_config = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb) {
@@ -8201,7 +8210,7 @@ void intel_mark_idle(struct drm_device *dev)
hsw_package_c8_gpu_idle(dev_priv);
- if (!i915_powersave)
+ if (!i915.powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -8221,7 +8230,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- if (!i915_powersave)
+ if (!i915.powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -8766,6 +8775,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
*/
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -8780,6 +8790,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->new_enabled = crtc->base.enabled;
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
}
/**
@@ -8789,6 +8809,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
*/
static void intel_modeset_commit_output_state(struct drm_device *dev)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -8801,6 +8822,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
base.head) {
encoder->base.crtc = &encoder->new_crtc->base;
}
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->base.enabled = crtc->new_enabled;
+ }
}
static void
@@ -9127,29 +9153,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
*prepare_pipes |= 1 << encoder->new_crtc->pipe;
}
- /* Check for any pipes that will be fully disabled ... */
+ /* Check for pipes that will be enabled/disabled ... */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- bool used = false;
-
- /* Don't try to disable disabled crtcs. */
- if (!intel_crtc->base.enabled)
+ if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
- if (encoder->new_crtc == intel_crtc)
- used = true;
- }
-
- if (!used)
+ if (!intel_crtc->new_enabled)
*disable_pipes |= 1 << intel_crtc->pipe;
+ else
+ *prepare_pipes |= 1 << intel_crtc->pipe;
}
/* set_mode is also used to update properties on life display pipes. */
intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled)
+ if (intel_crtc->new_enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
/*
@@ -9208,10 +9227,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
intel_modeset_commit_output_state(dev);
- /* Update computed state. */
+ /* Double check state. */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
- intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->new_config &&
+ intel_crtc->new_config != &intel_crtc->config);
+ WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -9380,10 +9402,8 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!HAS_DDI(dev)) {
- PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
- }
+ PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -9643,6 +9663,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
+ to_intel_crtc(crtc)->new_config = pipe_config;
}
/*
@@ -9653,8 +9674,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev)) {
- valleyview_modeset_global_pipes(dev, &prepare_pipes,
- modeset_pipes, pipe_config);
+ valleyview_modeset_global_pipes(dev, &prepare_pipes);
/* may have added more to prepare_pipes than we should */
prepare_pipes &= ~disable_pipes;
@@ -9676,6 +9696,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
+ to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
/*
* Calculate and store various constants which
@@ -9746,16 +9767,24 @@ static void intel_set_config_free(struct intel_set_config *config)
kfree(config->save_connector_encoders);
kfree(config->save_encoder_crtcs);
+ kfree(config->save_crtc_enabled);
kfree(config);
}
static int intel_set_config_save_state(struct drm_device *dev,
struct intel_set_config *config)
{
+ struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int count;
+ config->save_crtc_enabled =
+ kcalloc(dev->mode_config.num_crtc,
+ sizeof(bool), GFP_KERNEL);
+ if (!config->save_crtc_enabled)
+ return -ENOMEM;
+
config->save_encoder_crtcs =
kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
@@ -9773,6 +9802,11 @@ static int intel_set_config_save_state(struct drm_device *dev,
* restored, not the drivers personal bookkeeping.
*/
count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ config->save_crtc_enabled[count++] = crtc->enabled;
+ }
+
+ count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
config->save_encoder_crtcs[count++] = encoder->crtc;
}
@@ -9788,11 +9822,22 @@ static int intel_set_config_save_state(struct drm_device *dev,
static void intel_set_config_restore_state(struct drm_device *dev,
struct intel_set_config *config)
{
+ struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int count;
count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ crtc->new_enabled = config->save_crtc_enabled[count++];
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
+ count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
@@ -9840,7 +9885,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915_fastboot) {
+ if (intel_crtc->active && i915.fastboot) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -9876,9 +9921,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
struct intel_set_config *config)
{
- struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
int ro;
/* The upper layers ensure that we either disable a crtc or have a list
@@ -9921,6 +9966,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Update crtc of enabled connectors. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
+ struct drm_crtc *new_crtc;
+
if (!connector->new_encoder)
continue;
@@ -9971,9 +10018,58 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Now we've also updated encoder->new_crtc for all encoders. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ crtc->new_enabled = false;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == crtc) {
+ crtc->new_enabled = true;
+ break;
+ }
+ }
+
+ if (crtc->new_enabled != crtc->base.enabled) {
+ DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ crtc->new_enabled ? "en" : "dis");
+ config->mode_changed = true;
+ }
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
return 0;
}
+static void disable_crtc_nofb(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ if (connector->new_encoder &&
+ connector->new_encoder->new_crtc == crtc)
+ connector->new_encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc == crtc)
+ encoder->new_crtc = NULL;
+ }
+
+ crtc->new_enabled = false;
+ crtc->new_config = NULL;
+}
+
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
@@ -10040,7 +10136,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
- if (i915_fastboot && ret == 0)
+ if (i915.fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
}
@@ -10050,6 +10146,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ /*
+ * HACK: if the pipe was on, but we didn't have a framebuffer,
+ * force the pipe off to avoid oopsing in the modeset code
+ * due to fb==NULL. This should only happen during boot since
+ * we don't yet reconstruct the FB from the hardware state.
+ */
+ if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
+ disable_crtc_nofb(to_intel_crtc(save_set.crtc));
+
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
@@ -10839,6 +10944,9 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -10869,6 +10977,7 @@ static void i915_disable_vga(struct drm_device *dev)
u8 sr1;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
@@ -11265,7 +11374,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
- if (crtc->active && i915_fastboot) {
+ if (crtc->active && i915.fastboot) {
intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@ -11329,7 +11438,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
mutex_lock(&dev->mode_config.mutex);
- drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
mutex_unlock(&dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 57552eb386b0..bd1df502bc34 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
}
static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+ struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- max_link_bw = DP_LINK_BW_2_7;
+ if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+ intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
+ max_link_bw = DP_LINK_BW_5_4;
+ else
+ max_link_bw = DP_LINK_BW_2_7;
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
-static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -302,7 +309,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
-static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -319,7 +326,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(_pp_stat_reg(intel_dp)),
@@ -351,31 +358,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
- int index)
+static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- /* The clock divider is based off the hrawclk,
- * and would like to run at 2MHz. So, take the
- * hrawclk value and divide by 2 and use that
- *
- * Note that PCH attached eDP panels should use a 125MHz input
- * clock divider.
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
- if (IS_VALLEYVIEW(dev)) {
- return index ? 0 : 100;
- } else if (intel_dig_port->port == PORT_A) {
- if (index)
- return 0;
- if (HAS_DDI(dev))
- return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
+ return index ? 0 : intel_hrawclk(dev) / 2;
+}
+
+static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ if (index)
+ return 0;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
+ } else {
+ return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ }
+}
+
+static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
@@ -383,13 +405,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
case 1: return 72;
default: return 0;
}
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
- } else {
- return index ? 0 :intel_hrawclk(dev) / 2;
}
}
+static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ return index ? 0 : 100;
+}
+
+static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t precharge, timeout;
+
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -403,9 +458,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
- int try, precharge, clock = 0;
+ int try, clock = 0;
bool has_aux_irq = HAS_AUX_IRQ(dev);
- uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
@@ -415,16 +469,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
intel_dp_check_edp(intel_dp);
- if (IS_GEN6(dev))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
intel_aux_display_runtime_get(dev_priv);
/* Try to wait for any previous AUX channel activity */
@@ -448,7 +492,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
- while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider);
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -457,16 +506,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- timeout |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ I915_WRITE(ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
@@ -637,7 +677,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int reply_bytes;
int ret;
- ironlake_edp_panel_vdd_on(intel_dp);
+ edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
@@ -740,7 +780,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
ret = -EREMOTEIO;
out:
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -812,9 +852,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ /* Conveniently, the link BW constants become indices with a shift...*/
+ int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int bpp, mode_rate;
- static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@ -1015,16 +1056,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
ironlake_set_pll_cpu_edp(intel_dp);
}
-#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
@@ -1049,24 +1090,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Wait complete\n");
}
-static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+static void wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
- ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+static void wait_panel_off(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power off time\n");
- ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
-static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
- ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+ intel_dp->panel_power_cycle_delay);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+ intel_dp->backlight_on_delay);
}
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+ intel_dp->backlight_off_delay);
+}
/* Read the current pp_control value, unlocking the register if it
* is locked
@@ -1084,7 +1142,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
return control;
}
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+static void edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1099,15 +1157,15 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_dp->want_panel_vdd = true;
- if (ironlake_edp_have_panel_vdd(intel_dp))
+ if (edp_have_panel_vdd(intel_dp))
return;
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_KMS("Turning eDP VDD on\n");
- if (!ironlake_edp_have_panel_power(intel_dp))
- ironlake_wait_panel_power_cycle(intel_dp);
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
@@ -1122,13 +1180,13 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
/*
* If the panel wasn't on, delay before accessing aux channel
*/
- if (!ironlake_edp_have_panel_power(intel_dp)) {
+ if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
msleep(intel_dp->panel_power_up_delay);
}
}
-static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1137,7 +1195,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("Turning eDP VDD off\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1154,24 +1212,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
if ((pp & POWER_TARGET_ON) == 0)
- msleep(intel_dp->panel_power_cycle_delay);
+ intel_dp->last_power_cycle = jiffies;
intel_runtime_pm_put(dev_priv);
}
}
-static void ironlake_panel_vdd_work(struct work_struct *__work)
+static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1181,7 +1239,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
intel_dp->want_panel_vdd = false;
if (sync) {
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
} else {
/*
* Queue the timer to fire a long
@@ -1193,7 +1251,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void intel_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1205,12 +1263,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power on\n");
- if (ironlake_edp_have_panel_power(intel_dp)) {
+ if (edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
- ironlake_wait_panel_power_cycle(intel_dp);
+ wait_panel_power_cycle(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
@@ -1228,7 +1286,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- ironlake_wait_panel_on(intel_dp);
+ wait_panel_on(intel_dp);
+ intel_dp->last_power_on = jiffies;
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1237,7 +1296,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void intel_edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1249,6 +1308,8 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
+ edp_wait_backlight_off(intel_dp);
+
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
@@ -1259,10 +1320,11 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- ironlake_wait_panel_off(intel_dp);
+ intel_dp->last_power_cycle = jiffies;
+ wait_panel_off(intel_dp);
}
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1280,7 +1342,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
- msleep(intel_dp->backlight_on_delay);
+ wait_backlight_on(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
@@ -1292,7 +1354,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
intel_panel_enable_backlight(intel_dp->attached_connector);
}
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1312,7 +1374,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- msleep(intel_dp->backlight_off_delay);
+ intel_dp->last_backlight_off = jiffies;
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1597,10 +1659,12 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+ uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
@@ -1668,7 +1732,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if (!i915_enable_psr) {
+ if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
@@ -1784,9 +1848,9 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- ironlake_edp_backlight_off(intel_dp);
+ intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- ironlake_edp_panel_off(intel_dp);
+ intel_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -1816,11 +1880,11 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
- ironlake_edp_panel_vdd_on(intel_dp);
+ edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
@@ -1830,14 +1894,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_enable_dp(encoder);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
}
static void vlv_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- ironlake_edp_backlight_on(intel_dp);
+ intel_edp_backlight_on(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -2630,10 +2694,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
+ uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+
+ /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
+ if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+ training_pattern = DP_TRAINING_PATTERN_3;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
@@ -2660,7 +2729,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
@@ -2676,7 +2745,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_2 |
+ training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
@@ -2818,6 +2887,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
+ /* Training Pattern 3 support */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+ intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
+ intel_dp->use_tps3 = true;
+ DRM_DEBUG_KMS("Displayport TPS3 supported");
+ } else
+ intel_dp->use_tps3 = false;
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2841,7 +2918,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- ironlake_edp_panel_vdd_on(intel_dp);
+ edp_panel_vdd_on(intel_dp);
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
@@ -2851,7 +2928,36 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ edp_panel_vdd_off(intel_dp, false);
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ u8 buf[1];
+
+ if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
+ return -EAGAIN;
+
+ if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ return -ENOTTY;
+
+ if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
+ DP_TEST_SINK_START))
+ return -EAGAIN;
+
+ /* Wait 2 vblanks to be sure we will have the correct CRC value */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
+ return -EAGAIN;
+
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
+ return 0;
}
static bool
@@ -3295,7 +3401,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
kfree(intel_dig_port);
@@ -3394,6 +3500,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
}
+static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->last_power_cycle = jiffies;
+ intel_dp->last_power_on = jiffies;
+ intel_dp->last_backlight_off = jiffies;
+}
+
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
@@ -3516,10 +3629,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
- /* And finally store the new values in the power sequencer. */
+ /*
+ * And finally store the new values in the power sequencer. The
+ * backlight delays are set to 1 because we do manual waits on them. For
+ * T8, even BSpec recommends doing it. For T9, if we don't do this,
+ * we'll end up waiting for the backlight off delay twice: once when we
+ * do the manual sleep, and once when we disable the panel and wait for
+ * the PP_STATUS bit to become zero.
+ */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
@@ -3554,14 +3674,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
}
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector,
+ struct edp_power_seq *power_seq)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
- struct edp_power_seq power_seq = { 0 };
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
@@ -3569,12 +3689,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-
/* Cache DPCD and EDID for edp. */
- ironlake_edp_panel_vdd_on(intel_dp);
+ edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ edp_panel_vdd_off(intel_dp, false);
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -3588,8 +3706,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
/* We now know it's not a ghost, init power sequence regs. */
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
@@ -3638,9 +3755,22 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
+ struct edp_power_seq power_seq = { 0 };
const char *name = NULL;
int type, error;
+ /* intel_dp vfuncs */
+ if (IS_VALLEYVIEW(dev))
+ intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
@@ -3669,7 +3799,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
+ edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
@@ -3721,18 +3851,23 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
BUG();
}
+ if (is_edp(intel_dp)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ }
+
error = intel_dp_i2c_init(intel_dp, intel_connector, name);
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
intel_dp->psr_setup_done = false;
- if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
i2c_del_adapter(&intel_dp->adapter);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
drm_sysfs_connector_remove(connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fbfaaba5cc3b..44067bce5e04 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -359,6 +359,8 @@ struct intel_crtc {
bool cursor_visible;
struct intel_crtc_config config;
+ struct intel_crtc_config *new_config;
+ bool new_enabled;
uint32_t ddi_pll_sel;
@@ -485,8 +487,22 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ unsigned long last_power_cycle;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
bool psr_setup_done;
+ bool use_tps3;
struct intel_connector *attached_connector;
+
+ uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ /*
+ * This function returns the value we have to program the AUX_CTL
+ * register with to kick off an AUX transaction.
+ */
+ uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider);
};
struct intel_digital_port {
@@ -540,6 +556,7 @@ struct intel_unpin_work {
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
+ bool *save_crtc_enabled;
bool fb_changed;
bool mode_changed;
@@ -721,15 +738,14 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_backlight_on(struct intel_dp *intel_dp);
+void intel_edp_backlight_off(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 39eac9937a4a..d6a8a716018d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -104,7 +104,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
return 0;
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
@@ -208,7 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
return 0;
out_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d17f47..43872f00822a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
}
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
- enum transcoder cpu_transcoder)
+ enum transcoder cpu_transcoder,
+ struct drm_i915_private *dev_priv)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
- intel_crtc->config.cpu_transcoder);
+ intel_crtc->config.cpu_transcoder,
+ dev_priv);
if (data_reg == 0)
return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8bcb93a2a9f6..3f3043b4ff26 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
/* use the module option value if specified */
- if (i915_lvds_channel_mode > 0)
- return i915_lvds_channel_mode == 2;
+ if (i915.lvds_channel_mode > 0)
+ return i915.lvds_channel_mode == 2;
if (dmi_check_system(intel_dual_link_lvds))
return true;
@@ -1036,7 +1036,7 @@ void intel_lvds_init(struct drm_device *dev)
intel_find_panel_downclock(dev,
fixed_mode, connector);
if (intel_connector->panel.downclock_mode !=
- NULL && i915_lvds_downclock) {
+ NULL && i915.lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = true;
dev_priv->lvds_downclock =
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a759ecdb7a6e..424f0946d8c4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
struct drm_i915_gem_object *obj = overlay->old_vid_bo;
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin(new_bo);
+ i915_gem_object_ggtt_unpin(new_bo);
return ret;
}
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
if (!OVERLAY_NEEDS_PHYSICAL(dev))
- i915_gem_object_unpin(reg_bo);
+ i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de359123a..f1ee2c4d282e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,8 +33,6 @@
#include <linux/moduleparam.h>
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -325,13 +323,6 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
WARN_ON(panel->backlight.max == 0);
- if (i915_panel_invert_brightness < 0)
+ if (i915.invert_brightness < 0)
return val;
- if (i915_panel_invert_brightness > 0 ||
+ if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val;
}
@@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
- switch (i915_panel_ignore_lid) {
+ switch (i915.panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81900f9..f74d7f506aa9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -97,7 +97,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
- int plane, i;
+ int i;
u32 fbc_ctl;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
@@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
@@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
@@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
@@ -154,17 +153,19 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -224,18 +225,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@ -282,12 +281,16 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
- I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
- IVB_DPFC_CTL_FENCE_EN |
- intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -466,7 +469,7 @@ void intel_update_fbc(struct drm_device *dev)
return;
}
- if (!i915_powersave) {
+ if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
@@ -505,13 +508,13 @@ void intel_update_fbc(struct drm_device *dev)
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode;
- if (i915_enable_fbc < 0 &&
+ if (i915.enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
- if (!i915_enable_fbc) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
@@ -1886,7 +1889,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
+static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
{
/* max that registers can hold */
if (INTEL_INFO(dev)->gen >= 8)
@@ -1895,7 +1898,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
return 15;
}
-static void ilk_compute_wm_maximums(struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
@@ -1948,7 +1951,7 @@ static bool ilk_validate_wm_level(int level,
return ret;
}
-static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
int level,
const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
@@ -2140,7 +2143,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
@@ -2753,7 +2756,7 @@ intel_alloc_context_page(struct drm_device *dev)
return ctx;
err_unpin:
- i915_gem_object_unpin(ctx);
+ i915_gem_object_ggtt_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
return NULL;
@@ -3000,6 +3003,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
dev_priv->rps.last_adj = 0;
}
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3008,8 +3014,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
- if (val == dev_priv->rps.cur_delay)
+ if (val == dev_priv->rps.cur_delay) {
+ /* min/max delay may still have been modified so be sure to
+ * write the limits value */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ gen6_rps_limits(dev_priv, val));
+
return;
+ }
gen6_set_rps_thresholds(dev_priv, val);
@@ -3035,6 +3047,58 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+ /*
+ * When we are idle. Drop to min voltage state.
+ */
+
+ if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+ return;
+
+ /* Mask turbo interrupt so that they will not come in between */
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+
+ /* Bring up the Gfx clock */
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
+ VLV_GFX_CLK_FORCE_ON_BIT);
+
+ if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
+ DRM_ERROR("GFX_CLK_ON request timed out\n");
+ return;
+ }
+
+ dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+ dev_priv->rps.min_delay);
+
+ if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+ & GENFREQSTATUS) == 0, 5))
+ DRM_ERROR("timed out waiting for Punit\n");
+
+ /* Release the Gfx clock */
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
+ ~VLV_GFX_CLK_FORCE_ON_BIT);
+
+ /* Unmask Up interrupts */
+ dev_priv->rps.rp_up_masked = true;
+ gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
+ dev_priv->rps.min_delay);
+}
+
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -3042,7 +3106,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
dev_priv->rps.last_adj = 0;
@@ -3151,8 +3215,8 @@ int intel_enable_rc6(const struct drm_device *dev)
return 0;
/* Respect the kernel parameter if it is set */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
+ if (i915.enable_rc6 >= 0)
+ return i915.enable_rc6;
/* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
@@ -3267,7 +3331,7 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 rp_state_cap;
+ u32 rp_state_cap, hw_max, hw_min;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
@@ -3296,13 +3360,20 @@ static void gen6_enable_rps(struct drm_device *dev)
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 50MHz */
- dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
- dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+ dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
+ hw_min = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0;
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_delay == 0)
+ dev_priv->rps.max_delay = hw_max;
+
+ if (dev_priv->rps.min_delay == 0)
+ dev_priv->rps.min_delay = hw_min;
+
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3531,7 +3602,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 gtfifodbg, val, rc6_mode = 0;
+ u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3593,21 +3664,27 @@ static void valleyview_enable_rps(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
- dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
- dev_priv->rps.hw_max = dev_priv->rps.max_delay;
+ dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
- dev_priv->rps.max_delay);
+ vlv_gpu_freq(dev_priv, hw_max),
+ hw_max);
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
- dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
- dev_priv->rps.min_delay);
+ vlv_gpu_freq(dev_priv, hw_min),
+ hw_min);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_delay == 0)
+ dev_priv->rps.max_delay = hw_max;
+
+ if (dev_priv->rps.min_delay == 0)
+ dev_priv->rps.min_delay = hw_min;
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
@@ -3615,6 +3692,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+ dev_priv->rps.rp_up_masked = false;
+ dev_priv->rps.rp_down_masked = false;
+
gen6_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
@@ -3625,13 +3705,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->ips.renderctx) {
- i915_gem_object_unpin(dev_priv->ips.renderctx);
+ i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
if (dev_priv->ips.pwrctx) {
- i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
@@ -4270,6 +4350,7 @@ void intel_gpu_ips_teardown(void)
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
}
+
static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4605,11 +4686,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
- * Also apply WaDisableVDSUnitClockGating:snb and
- * WaDisableRCPBUnitClockGating:snb.
+ * WaDisableRCCUnitClockGating:snb
+ * WaDisableRCPBUnitClockGating:snb
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -4655,14 +4735,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
reg &= ~GEN7_FF_SCHED_MASK;
reg |= GEN7_FF_TS_SCHED_HW;
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
- if (IS_HASWELL(dev_priv->dev))
- reg &= ~GEN7_FF_VS_REF_CNT_FFME;
-
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@@ -4709,8 +4792,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
- WARN(!i915_preliminary_hw_support,
- "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+ /*
+ * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
+ * pre-production hardware
+ */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -4761,21 +4846,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
ilk_init_lp_watermarks(dev);
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating:hsw workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode:hsw */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
@@ -4787,7 +4857,12 @@ static void haswell_init_clock_gating(struct drm_device *dev)
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaVSRefCountFullforceMissDisable:hsw */
- gen7_setup_fixed_func_scheduler(dev_priv);
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
/* WaDisable4x2SubspanOptimization:hsw */
I915_WRITE(CACHE_MODE_1,
@@ -4825,9 +4900,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- else
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -4841,31 +4913,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- else
+ else {
+ /* must write both registers */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
+ }
/* WaForceL3Serialization:ivb */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- *
+ /*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -4874,9 +4939,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
@@ -4927,18 +4995,14 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode:vlv */
+ /* WaDisableL3CacheAging:vlv */
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
@@ -4953,51 +5017,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- *
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
- *
- * Also apply WaDisableVDSUnitClockGating:vlv and
- * WaDisableRCPBUnitClockGating:vlv.
*/
I915_WRITE(GEN6_UCGCTL2,
- GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
- GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableL3Bank2xClockGate:vlv */
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
+ * WaIncreaseL3CreditsForVLVB0:vlv
+ * This is the hardware default actually.
+ */
+ I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+
+ /*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
-
- /* Conservative clock gating settings for now */
- I915_WRITE(0x9400, 0xffffffff);
- I915_WRITE(0x9404, 0xffffffff);
- I915_WRITE(0x9408, 0xffffffff);
- I915_WRITE(0x940c, 0xffffffff);
- I915_WRITE(0x9410, 0xffffffff);
- I915_WRITE(0x9414, 0xffffffff);
- I915_WRITE(0x9418, 0xffffffff);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -5272,7 +5324,7 @@ static void __intel_power_well_put(struct drm_device *dev,
WARN_ON(!power_well->count);
if (!--power_well->count && power_well->set &&
- i915_disable_power_well) {
+ i915.disable_power_well) {
power_well->set(dev, power_well, false);
hsw_enable_package_c8(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31b36c5ac894..8c1c0bc3e630 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -549,7 +549,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
return 0;
err_unpin:
- i915_gem_object_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
err_unref:
drm_gem_object_unreference(&ring->scratch.obj->base);
err:
@@ -625,7 +625,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl));
- i915_gem_object_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -1253,7 +1253,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
return;
kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
}
@@ -1293,7 +1293,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
err:
@@ -1390,7 +1390,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
err_unmap:
iounmap(ring->virtual_start);
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
ring->obj = NULL;
@@ -1418,7 +1418,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
iounmap(ring->virtual_start);
- i915_gem_object_unpin(ring->obj);
+ i915_gem_object_ggtt_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
ring->preallocated_lazy_request = NULL;
@@ -1430,28 +1430,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
-{
- int ret;
-
- ret = i915_wait_seqno(ring, seqno);
- if (!ret)
- i915_gem_retire_requests_ring(ring);
-
- return ret;
-}
-
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{
struct drm_i915_gem_request *request;
- u32 seqno = 0;
+ u32 seqno = 0, tail;
int ret;
- i915_gem_retire_requests_ring(ring);
-
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
+
ring->space = ring_space(ring);
if (ring->space >= n)
return 0;
@@ -1468,6 +1456,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
space += ring->size;
if (space >= n) {
seqno = request->seqno;
+ tail = request->tail;
break;
}
@@ -1482,15 +1471,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (seqno == 0)
return -ENOSPC;
- ret = intel_ring_wait_seqno(ring, seqno);
+ ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
- if (WARN_ON(ring->last_retired_head == -1))
- return -ENOSPC;
-
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
+ ring->head = tail;
ring->space = ring_space(ring);
if (WARN_ON(ring->space < n))
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0b243ce33714..08b91c6ac70a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -41,6 +41,8 @@ enum intel_ring_hangcheck_action {
HANGCHECK_HUNG,
};
+#define HANGCHECK_SCORE_RING_HUNG 31
+
struct intel_ring_hangcheck {
bool deadlock;
u32 seqno;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 716a3c9c0751..336ae6c602f2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
- I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
- I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
obj->tiling_mode,
@@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
@@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 87df68f5f504..c62841404c82 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -852,6 +852,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
+ struct i915_hw_context *ctx;
int ret;
if (args->flags || args->pad)
@@ -864,11 +865,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (ret)
return ret;
- hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
- if (IS_ERR(hs)) {
+ ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(hs);
+ return PTR_ERR(ctx);
}
+ hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);