summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c170
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c92
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c276
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c14
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c107
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c37
-rw-r--r--drivers/gpu/drm/radeon/r100.c19
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c180
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c165
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c75
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
100 files changed, 2097 insertions, 1133 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e050..305c59003963 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
If M is selected, the module will be called radeon.
+source "drivers/gpu/drm/radeon/Kconfig"
+
config DRM_I810
tristate "Intel I810"
depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- gart_info->table_mask);
+ (unsigned long long)gart_info->table_mask);
ret = 1;
goto done;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index defcaf108460..f665b05592f3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
+ printk(KERN_WARNING "composite sync not supported\n");
}
/* it is incorrect if hsync/vsync width is zero */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1c2b7d44ec05..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
if (IS_ERR(obj->filp))
goto free;
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
-
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
wasted += alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (entry->size >= size + wasted &&
+ (entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dceb..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
obj = obj_priv->obj;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("Failed to get pages: %d\n", ret);
spin_unlock(&dev_priv->mm.active_list_lock);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto fail_batch_free;
+ }
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
const static struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .has_pipe_cxsr = 1,
+ .need_gfx_hws = 1,
.has_hotplug = 1,
};
@@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_freeze(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev || !dev_priv) {
- DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (i915_gem_idle(dev))
+ int error = i915_gem_idle(dev);
+ if (error) {
dev_err(&dev->pdev->dev,
- "GEM idle failed, resume may fail\n");
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
drm_irq_uninstall(dev);
}
i915_save_state(dev);
+ return 0;
+}
+
+static void i915_drm_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
intel_opregion_free(dev, 1);
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+ int error;
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+
+ i915_drm_suspend(dev);
+
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
return 0;
}
-static int i915_resume(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
-
- i915_restore_state(dev);
-
- intel_opregion_init(dev, 1);
+ int error = 0;
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- ret = -1;
+ error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
- }
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
}
dev_priv->modeset_on_lid = 0;
- return ret;
+ return error;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+ i915_restore_state(dev);
+
+ intel_opregion_init(dev, 1);
+
+ return i915_drm_thaw(dev);
}
/**
@@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
- return i915_suspend(dev, state);
-}
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
-static int
-i915_pci_resume(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ error = i915_drm_freeze(drm_dev);
+ if (error)
+ return error;
- return i915_resume(dev);
-}
+ i915_drm_suspend(drm_dev);
-static int
-i915_pm_suspend(struct device *dev)
-{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
-}
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
-static int
-i915_pm_resume(struct device *dev)
-{
- return i915_pci_resume(to_pci_dev(dev));
+ return 0;
}
-static int
-i915_pm_freeze(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_resume(drm_dev);
}
-static int
-i915_pm_thaw(struct device *dev)
+static int i915_pm_freeze(struct device *dev)
{
- /* thaw during hibernate, do nothing! */
- return 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
}
-static int
-i915_pm_poweroff(struct device *dev)
+static int i915_pm_thaw(struct device *dev)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
}
-static int
-i915_pm_restore(struct device *dev)
+static int i915_pm_poweroff(struct device *dev)
{
- return i915_pci_resume(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
+
+ error = i915_drm_freeze(drm_dev);
+ if (!error)
+ i915_drm_suspend(drm_dev);
+
+ return error;
}
const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
.freeze = i915_pm_freeze,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
- .restore = i915_pm_restore,
+ .restore = i915_pm_resume,
};
static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c1669488b5a..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
@@ -872,7 +883,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj);
+int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c67924ca80c..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -321,40 +321,24 @@ fail_unlock:
return ret;
}
-static inline gfp_t
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
-{
- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
-}
-
-static inline void
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
-{
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
-}
-
static int
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
{
int ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
/* If we've insufficient memory to map in the pages, attempt
* to make some space by throwing out some old buffers.
*/
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- gfp_t gfp;
ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
- gfp = i915_gem_object_get_page_gfp_mask(obj);
- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
- ret = i915_gem_object_get_pages(obj);
- i915_gem_object_set_page_gfp_mask (obj, gfp);
+ ret = i915_gem_object_get_pages(obj, 0);
}
return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2100,8 +2088,8 @@ static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
int ret;
+ uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
}
int
-i915_gem_object_get_pages(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
- page = read_mapping_page(mapping, i, NULL);
+ page = read_cache_page_gfp(mapping, i,
+ mapping_gfp_mask (mapping) |
+ __GFP_COLD |
+ gfpmask);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
i915_gem_object_put_pages(obj);
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- bool retry_alloc = false;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
- }
- ret = i915_gem_object_get_pages(obj);
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
- }
+ ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
- if (! retry_alloc) {
- retry_alloc = true;
- goto search_free;
+ if (gfpmask) {
+ gfpmask = 0;
+ goto search_free;
}
return ret;
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
+ BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -3584,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t reloc_count = 0, i;
int ret = 0;
+ if (relocs == NULL)
+ return 0;
+
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
int unwritten;
@@ -3673,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs;
+ struct drm_i915_gem_relocation_entry *relocs = NULL;
int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
@@ -3699,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
ret = copy_from_user(cliprects,
(struct drm_clip_rect __user *)
@@ -3742,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3750,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3863,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ if (obj->write_domain)
+ list_move_tail(&obj_priv->gpu_write_list,
+ &dev_priv->mm.gpu_write_list);
+ else
+ list_del_init(&obj_priv->gpu_write_list);
+
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@@ -3946,6 +3948,7 @@ err:
mutex_unlock(&dev->struct_mutex);
+pre_mutex_err:
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@@ -3960,7 +3963,6 @@ err:
ret = ret2;
}
-pre_mutex_err:
drm_free_large(object_list);
kfree(cliprects);
@@ -4383,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@@ -4834,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4946,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret)
goto out;
@@ -5004,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ }
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
/* check event from PCH */
if ((de_iir & DE_PCH_EVENT) &&
(pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IRONLAKE(dev))
- return 0;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_I965G(dev))
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IRONLAKE(dev))
- return;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
+ dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
#define IRONLAKE_DOT_MAX 350000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
-#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 6
-#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 127
#define IRONLAKE_M1_MIN 12
#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
-#define IRONLAKE_P_SDVO_DAC_MIN 5
-#define IRONLAKE_P_SDVO_DAC_MAX 80
-#define IRONLAKE_P_LVDS_MIN 28
-#define IRONLAKE_P_LVDS_MAX 112
-#define IRONLAKE_P1_MIN 1
-#define IRONLAKE_P1_MAX 8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
-#define IRONLAKE_P_DISPLAY_PORT_MIN 10
-#define IRONLAKE_P_DISPLAY_PORT_MAX 20
-#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
-#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
-#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
-#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
-#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
+/* We have parameter ranges for different type of outputs. */
+
+/* DAC & HDMI Refclk 120Mhz */
+#define IRONLAKE_DAC_N_MIN 1
+#define IRONLAKE_DAC_N_MAX 5
+#define IRONLAKE_DAC_M_MIN 79
+#define IRONLAKE_DAC_M_MAX 127
+#define IRONLAKE_DAC_P_MIN 5
+#define IRONLAKE_DAC_P_MAX 80
+#define IRONLAKE_DAC_P1_MIN 1
+#define IRONLAKE_DAC_P1_MAX 8
+#define IRONLAKE_DAC_P2_SLOW 10
+#define IRONLAKE_DAC_P2_FAST 5
+
+/* LVDS single-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_S_N_MIN 1
+#define IRONLAKE_LVDS_S_N_MAX 3
+#define IRONLAKE_LVDS_S_M_MIN 79
+#define IRONLAKE_LVDS_S_M_MAX 118
+#define IRONLAKE_LVDS_S_P_MIN 28
+#define IRONLAKE_LVDS_S_P_MAX 112
+#define IRONLAKE_LVDS_S_P1_MIN 2
+#define IRONLAKE_LVDS_S_P1_MAX 8
+#define IRONLAKE_LVDS_S_P2_SLOW 14
+#define IRONLAKE_LVDS_S_P2_FAST 14
+
+/* LVDS dual-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_D_N_MIN 1
+#define IRONLAKE_LVDS_D_N_MAX 3
+#define IRONLAKE_LVDS_D_M_MIN 79
+#define IRONLAKE_LVDS_D_M_MAX 127
+#define IRONLAKE_LVDS_D_P_MIN 14
+#define IRONLAKE_LVDS_D_P_MAX 56
+#define IRONLAKE_LVDS_D_P1_MIN 2
+#define IRONLAKE_LVDS_D_P1_MAX 8
+#define IRONLAKE_LVDS_D_P2_SLOW 7
+#define IRONLAKE_LVDS_D_P2_FAST 7
+
+/* LVDS single-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_S_SSC_N_MIN 1
+#define IRONLAKE_LVDS_S_SSC_N_MAX 2
+#define IRONLAKE_LVDS_S_SSC_M_MIN 79
+#define IRONLAKE_LVDS_S_SSC_M_MAX 126
+#define IRONLAKE_LVDS_S_SSC_P_MIN 28
+#define IRONLAKE_LVDS_S_SSC_P_MAX 112
+#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
+#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
+#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
+
+/* LVDS dual-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_D_SSC_N_MIN 1
+#define IRONLAKE_LVDS_D_SSC_N_MAX 3
+#define IRONLAKE_LVDS_D_SSC_M_MIN 79
+#define IRONLAKE_LVDS_D_SSC_M_MAX 126
+#define IRONLAKE_LVDS_D_SSC_P_MIN 14
+#define IRONLAKE_LVDS_D_SSC_P_MAX 42
+#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
+#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
+#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
+
+/* DisplayPort */
+#define IRONLAKE_DP_N_MIN 1
+#define IRONLAKE_DP_N_MAX 2
+#define IRONLAKE_DP_M_MIN 81
+#define IRONLAKE_DP_M_MAX 90
+#define IRONLAKE_DP_P_MIN 10
+#define IRONLAKE_DP_P_MAX 20
+#define IRONLAKE_DP_P2_FAST 10
+#define IRONLAKE_DP_P2_SLOW 10
+#define IRONLAKE_DP_P2_LIMIT 0
+#define IRONLAKE_DP_P1_MIN 1
+#define IRONLAKE_DP_P1_MAX 2
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
.find_pll = intel_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_sdvo = {
+static const intel_limit_t intel_limits_ironlake_dac = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
+ .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
+ .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
- .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+ .p2_slow = IRONLAKE_DAC_P2_SLOW,
+ .p2_fast = IRONLAKE_DAC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_lvds = {
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_LVDS_SLOW,
- .p2_fast = IRONLAKE_P2_LVDS_FAST },
+ .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN,
.max = IRONLAKE_VCO_MAX},
- .n = { .min = IRONLAKE_N_MIN,
- .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN,
- .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_DP_N_MIN,
+ .max = IRONLAKE_DP_N_MAX },
+ .m = { .min = IRONLAKE_DP_M_MIN,
+ .max = IRONLAKE_DP_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN,
.max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN,
.max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P_DISPLAY_PORT_MAX },
- .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
- .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
- .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
- .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
+ .p = { .min = IRONLAKE_DP_P_MIN,
+ .max = IRONLAKE_DP_P_MAX },
+ .p1 = { .min = IRONLAKE_DP_P1_MIN,
+ .max = IRONLAKE_DP_P1_MAX},
+ .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
+ .p2_slow = IRONLAKE_DP_P2_SLOW,
+ .p2_fast = IRONLAKE_DP_P2_FAST },
.find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_ironlake_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ int refclk = 120;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
+ refclk = 100;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP) {
+ /* LVDS dual channel */
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
- limit = &intel_limits_ironlake_sdvo;
+ limit = &intel_limits_ironlake_dac;
return limit;
}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ drm_vblank_off(dev, pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
};
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->obj);
- drm_gem_object_unreference(work->obj);
+ i915_gem_object_unpin(work->old_fb_obj);
+ drm_gem_object_unreference(work->pending_flip_obj);
+ drm_gem_object_unreference(work->old_fb_obj);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
+ if (work && !work->pending) {
+ obj_priv = work->pending_flip_obj->driver_private;
+ DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
+ obj_priv,
+ atomic_read(&obj_priv->pending_flip));
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->obj->driver_private;
- if (atomic_dec_and_test(&obj_priv->pending_flip))
+ obj_priv = work->pending_flip_obj->driver_private;
+
+ /* Initial scanout buffer will have a 0 pending flip count */
+ if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+ atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
}
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work) {
intel_crtc->unpin_work->pending = 1;
+ } else {
+ DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
- int ret;
+ int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
- work->obj = intel_fb->obj;
+ work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
mutex_unlock(&dev->struct_mutex);
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ obj->driver_private);
kfree(work);
+ intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
- /* Reference the old fb object for the scheduled work. */
- drm_gem_object_reference(work->obj);
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(work->old_fb_obj);
+ drm_gem_object_reference(obj);
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
obj_priv = obj->driver_private;
atomic_inc(&obj_priv->pending_flip);
+ work->pending_flip_obj = obj;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
- OUT_RING((fb->width << 16) | fb->height);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+ ret = i915_gem_object_pin(fbo, 64*1024);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..b1d0acbae4e4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
{
.ident = "Samsung SX20S",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
},
},
@@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = {
},
},
{
+ .ident = "Aspire 1810T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
+ },
+ },
+ {
.ident = "PC-81005",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
@@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
{
enum drm_connector_status status = connector_status_connected;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
+ } else if (flags & SDVO_OUTPUT_CVBS0) {
+
+ sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_priv->is_tv = true;
+ intel_output->needs_tv_clock = true;
+ intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
{
int result;
- if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
&result))
return -ENODEV;
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
- if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ if (result) { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ } else { /* Stamina mode - disable the external GPU */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
NULL);
- } else { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..2cd0fad17dac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
struct drm_device *dev = bios->dev;
/* C51 has misaligned regs on purpose. Marvellous */
- if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
- NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
- reg);
- return 0;
- }
- /*
- * Warn on C51 regs that have not been verified accessible in
- * mmiotracing
- */
+ if (reg & 0x2 ||
+ (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
+
+ /* warn on C51 regs that haven't been verified accessible in tracing */
if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
- /* Trust the init scripts on G80 */
- if (dev_priv->card_type >= NV_50)
- return 1;
-
- #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
- if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
- return 1;
- if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
- return 1;
- if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
- (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
- WITHIN(reg, 0xc000, 0x48))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
- if (reg == 0x00011014 || reg == 0x00020328)
- return 1;
- if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
- return 1;
+ if (reg >= (8*1024*1024)) {
+ NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
+ return 0;
}
- if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
- return 1;
- if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
- return 1;
- if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
- return 1;
- if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
- WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
- return 1;
- #undef WITHIN
-
- NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
- return 0;
+ return 1;
}
static bool
@@ -1906,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- if (dev_priv->card_type >= NV_50)
+ if (dev_priv->card_type >= NV_40)
return 1;
/*
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
- if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
- }
- if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ if ((dev->pci_device & 0xffff) == 0x0179 ||
+ (dev->pci_device & 0xffff) == 0x0189 ||
+ (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_RESET) {
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+
+ } else if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+
+ } else if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
}
}
#endif
@@ -3797,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct init_exec iexec = {true, false};
struct nvbios *bios = &dev_priv->VBIOS;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
@@ -3877,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
}
- bios->display.output = dcbent;
-
if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
@@ -3887,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
script = ROM16(otable[8]);
@@ -3897,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
if (table[4] >= 12)
@@ -3910,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3922,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3934,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@@ -5434,52 +5399,49 @@ static bool
parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
- if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
- conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
- conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
- conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
- conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
- conn != 0xf2205004 && conn != 0xf2209004) {
- NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
-
- /* cause output setting to fail for !TV, so message is seen */
- if ((conn & 0xf) != 0x1)
- dcb->entries = 0;
-
- return false;
- }
- /* most of the below is a "best guess" atm */
- entry->type = conn & 0xf;
- if (entry->type == 2)
- /* another way of specifying straps based lvds... */
+ switch (conn & 0x0000000f) {
+ case 0:
+ entry->type = OUTPUT_ANALOG;
+ break;
+ case 1:
+ entry->type = OUTPUT_TV;
+ break;
+ case 2:
+ case 3:
entry->type = OUTPUT_LVDS;
- if (entry->type == 4) { /* digital */
- if (conn & 0x10)
- entry->type = OUTPUT_LVDS;
- else
+ break;
+ case 4:
+ switch ((conn & 0x000000f0) >> 4) {
+ case 0:
entry->type = OUTPUT_TMDS;
+ break;
+ case 1:
+ entry->type = OUTPUT_LVDS;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
+ (conn & 0x000000f0) >> 4);
+ return false;
+ }
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
+ return false;
}
- /* what's in bits 5-13? could be some encoder maker thing, in tv case */
- entry->i2c_index = (conn >> 14) & 0xf;
- /* raw heads field is in range 0-1, so move to 1-2 */
- entry->heads = ((conn >> 18) & 0x7) + 1;
- entry->location = (conn >> 21) & 0xf;
- /* unused: entry->bus = (conn >> 25) & 0x7; */
- /* set or to be same as heads -- hopefully safe enough */
- entry->or = entry->heads;
+
+ entry->i2c_index = (conn & 0x0003c000) >> 14;
+ entry->heads = ((conn & 0x001c0000) >> 18) + 1;
+ entry->or = entry->heads; /* same as heads, hopefully safe enough */
+ entry->location = (conn & 0x01e00000) >> 21;
+ entry->bus = (conn & 0x0e000000) >> 25;
entry->duallink_possible = false;
switch (entry->type) {
case OUTPUT_ANALOG:
entry->crtconf.maxfreq = (conf & 0xffff) * 10;
break;
- case OUTPUT_LVDS:
- /*
- * This is probably buried in conn's unknown bits.
- * This will upset EDID-ful models, if they exist
- */
- entry->lvdsconf.use_straps_for_mode = true;
- entry->lvdsconf.use_power_scripts = true;
+ case OUTPUT_TV:
+ entry->tvconf.has_component_output = false;
break;
case OUTPUT_TMDS:
/*
@@ -5488,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
*/
fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
break;
- case OUTPUT_TV:
- entry->tvconf.has_component_output = false;
+ case OUTPUT_LVDS:
+ if ((conn & 0x00003f00) != 0x10)
+ entry->lvdsconf.use_straps_for_mode = true;
+ entry->lvdsconf.use_power_scripts = true;
+ break;
+ default:
break;
}
@@ -5564,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
dcb->entries = newentries;
}
-static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bios_parsed_dcb *bdcb = &bios->bdcb;
struct parsed_dcb *dcb;
- uint16_t dcbptr, i2ctabptr = 0;
+ uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
bool configblock = true;
@@ -5579,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
dcb->entries = 0;
/* get the offset from 0x36 */
- dcbptr = ROM16(bios->data[0x36]);
+ if (dev_priv->card_type > NV_04) {
+ dcbptr = ROM16(bios->data[0x36]);
+ if (dcbptr == 0x0000)
+ NV_WARN(dev, "No output data (DCB) found in BIOS\n");
+ }
+ /* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_WARN(dev, "No output data (DCB) found in BIOS, "
- "assuming a CRT output exists\n");
- /* this situation likely means a really old card, pre DCB */
+ NV_INFO(dev, "Assuming a CRT output exists\n");
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_tv_output(dcb, twoHeads);
return 0;
@@ -5891,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
+ unsigned long flags;
+ spin_lock_irqsave(&bios->lock, flags);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
+ spin_unlock_irqrestore(&bios->lock, flags);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -5903,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
+ spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..68446fd4146b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
+ spinlock_t lock;
+
uint8_t data[NV_PROM_SIZE];
unsigned int length;
bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e342a418d434..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
/*
* Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Overallocate memory to
- * avoid corruption of other buffer objects.
+ * align to to that as well as the page size. Align the size to the
+ * appropriate boundaries. This does imply that sizes are rounded up
+ * 3-7 pages, so be aware of this and do not waste memory by allocating
+ * many small buffers.
*/
if (dev_priv->card_type == NV_50) {
uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
case 0x2800:
case 0x4800:
case 0x7a00:
- *size = roundup(*size, block_size);
if (is_power_of_2(block_size)) {
- *size += 3 * block_size;
for (i = 1; i < 10; i++) {
*align = 12 * i * block_size;
if (!(*align % 65536))
break;
}
} else {
- *size += 6 * block_size;
for (i = 1; i < 10; i++) {
*align = 8 * i * block_size;
if (!(*align % 65536))
break;
}
}
+ *size = roundup(*size, *align);
break;
default:
break;
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem);
+ if (nvbo->channel && nvbo->channel != chan)
+ ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
/* Ensure the channel is no longer active on the GPU */
pfifo->reassign(dev, false);
- if (pgraph->channel(dev) == chan) {
- pgraph->fifo_access(dev, false);
+ pgraph->fifo_access(dev, false);
+ if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
- pgraph->fifo_access(dev, true);
- }
pgraph->destroy_context(chan);
+ pgraph->fifo_access(dev, true);
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
*
*/
+#include <acpi/button.h>
+
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
+
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
static void
nouveau_connector_destroy(struct drm_connector *drm_connector)
{
- struct nouveau_connector *connector = nouveau_connector(drm_connector);
- struct drm_device *dev = connector->base.dev;
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(drm_connector);
+ struct drm_device *dev;
- NV_DEBUG_KMS(dev, "\n");
-
- if (!connector)
+ if (!nv_connector)
return;
+ dev = nv_connector->base.dev;
+ NV_DEBUG_KMS(dev, "\n");
+
+ kfree(nv_connector->edid);
drm_sysfs_connector_remove(drm_connector);
drm_connector_cleanup(drm_connector);
kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
+#ifdef CONFIG_ACPI
+ if (!nouveau_ignorelid && !acpi_lid_open())
+ return connector_status_disconnected;
+#endif
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
+ /* Cleanup the previous EDID block. */
+ if (nv_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ kfree(nv_connector->edid);
+ nv_connector->edid = NULL;
+ }
+
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
if (!nv_connector->edid) {
NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
drm_get_connector_name(connector));
- return connector_status_disconnected;
+ goto detect_analog;
}
if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
return connector_status_connected;
}
+detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
!dev_priv->VBIOS.pub.fp_no_ddc) {
- nv_connector->edid =
+ struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
+ if (edid) {
+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ *(nv_connector->edid) = *edid;
+ }
}
if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 7afbe8b40d51..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
chan->dma.cur += nr_dwords;
}
-static inline bool
-READ_GET(struct nouveau_channel *chan, uint32_t *get)
+/* Fetch and adjust GPU GET pointer
+ *
+ * Returns:
+ * value >= 0, the adjusted GET pointer
+ * -EINVAL if GET pointer currently outside main push buffer
+ * -EBUSY if timeout exceeded
+ */
+static inline int
+READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
{
uint32_t val;
val = nvchan_rd32(chan, chan->user_get);
- if (val < chan->pushbuf_base ||
- val > chan->pushbuf_base + (chan->dma.max << 2)) {
- /* meaningless to dma_wait() except to know whether the
- * GPU has stalled or not
- */
- *get = val;
- return false;
+
+ /* reset counter as long as GET is still advancing, this is
+ * to avoid misdetecting a GPU lockup if the GPU happens to
+ * just be processing an operation that takes a long time
+ */
+ if (val != *prev_get) {
+ *prev_get = val;
+ *timeout = 0;
+ }
+
+ if ((++*timeout & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (*timeout > 100000)
+ return -EBUSY;
}
- *get = (val - chan->pushbuf_base) >> 2;
- return true;
+ if (val < chan->pushbuf_base ||
+ val > chan->pushbuf_base + (chan->dma.max << 2))
+ return -EINVAL;
+
+ return (val - chan->pushbuf_base) >> 2;
}
int
nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
- uint32_t get, prev_get = 0, cnt = 0;
- bool get_valid;
+ uint32_t prev_get = 0, cnt = 0;
+ int get;
while (chan->dma.free < size) {
- /* reset counter as long as GET is still advancing, this is
- * to avoid misdetecting a GPU lockup if the GPU happens to
- * just be processing an operation that takes a long time
- */
- get_valid = READ_GET(chan, &get);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- DRM_UDELAY(1);
- if (cnt > 100000)
- return -EBUSY;
- }
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
/* loop until we have a usable GET pointer. the value
* we read from the GPU may be outside the main ring if
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* from the SKIPS area, so the code below doesn't have to deal
* with some fun corner cases.
*/
- if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+ if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
continue;
if (get <= chan->dma.cur) {
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* after processing the currently pending commands.
*/
OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+
+ /* wait for GET to depart from the skips area.
+ * prevents writing GET==PUT and causing a race
+ * condition that causes us to think the GPU is
+ * idle when it's not.
+ */
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
+ if (unlikely(get == -EINVAL))
+ continue;
+ } while (get <= NOUVEAU_DMA_SKIPS);
WRITE_PUT(NOUVEAU_DMA_SKIPS);
/* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
nv_rd32(dev, NV50_AUXCH_CTRL(index)));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
udelay(400);
@@ -502,6 +503,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
}
if (cmd & 1) {
+ if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
for (i = 0; i < 4; i++) {
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify;
+int nouveau_vram_notify = 1;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -71,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
int nouveau_uscript_tmds = -1;
module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
+int nouveau_ignorelid = 0;
+module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable all acceleration");
+int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 026419fe8791..5445cefdd03e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -509,6 +509,8 @@ struct drm_nouveau_private {
void __iomem *ramin;
uint32_t ramin_size;
+ struct nouveau_bo *vga_ram;
+
struct workqueue_struct *wq;
struct work_struct irq_work;
@@ -675,6 +677,9 @@ extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
extern int nouveau_ctxfw;
+extern int nouveau_ignorelid;
+extern int nouveau_nofbaccel;
+extern int nouveau_noaccel;
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static struct fb_ops nv04_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv04_fbcon_fillrect,
+ .fb_copyarea = nv04_fbcon_copyarea,
+ .fb_imageblit = nv04_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static struct fb_ops nv50_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv50_fbcon_fillrect,
+ .fb_copyarea = nv50_fbcon_copyarea,
+ .fb_imageblit = nv50_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
dev_priv->fbdev_info = info;
strcpy(info->fix.id, "nouveaufb");
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ if (nouveau_nofbaccel)
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
par->nouveau_fb = nouveau_fb;
par->dev = dev;
- if (dev_priv->channel) {
+ if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
nv50_fbcon_accel_init(info);
+ info->fbops = &nv50_fbcon_ops;
break;
default:
nv04_fbcon_accel_init(info);
+ info->fbops = &nv04_fbcon_ops;
break;
};
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
void nouveau_fbcon_zfill(struct drm_device *dev);
+void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
+void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2009db2426c3..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -321,6 +321,7 @@ retry:
else {
NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
b->valid_domains);
+ list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
struct drm_nouveau_gem_pushbuf_bo *bo,
- int nr_relocs, uint64_t ptr_relocs,
- int nr_dwords, int first_dword,
+ unsigned nr_relocs, uint64_t ptr_relocs,
+ unsigned nr_dwords, unsigned first_dword,
uint32_t *pushbuf, bool is_iomem)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_device *dev = chan->dev;
- int ret = 0, i;
+ int ret = 0;
+ unsigned i;
reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
pbbo = nouveau_gem_object(gem);
+ if ((req->offset & 3) || req->nr_dwords < 2 ||
+ (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
+ (unsigned long)req->nr_dwords >
+ ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
+ NV_ERROR(dev, "pb call misaligned or out of bounds: "
+ "%d + %d * 4 > %ld\n",
+ req->offset, req->nr_dwords, pbbo->bo.mem.size);
+ ret = -EINVAL;
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
chan->fence.sequence);
if (ret) {
@@ -911,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ spin_unlock(&nvbo->bo.lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
}
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
+ if (!pgraph->ctxvals) {
+ NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 919a619ca7fa..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
get + 4);
}
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
if (status) {
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (nouveau_pgraph_intr_swmthd(dev, &trap))
unhandled = 1;
+ } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ uint32_t v = nv_rd32(dev, 0x402000);
+ nv_wr32(dev, 0x402000, v);
+
+ /* dump the error anyway for now: it's useful for
+ Gallium development */
+ unhandled = 1;
} else {
unhandled = 1;
}
@@ -559,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
- uint32_t status, nsource;
+ uint32_t status;
- status = nv_rd32(dev, NV03_PGRAPH_INTR);
- nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) &
+ ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
- nv50_graph_context_switch(dev);
+ nv50_graph_context_switch(dev);
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
- if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
-
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900,
+ nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08,
+ nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900,
+ nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08,
+ nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
+ status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
- {
- const int isb = (1 << 16) | (1 << 0);
+ {
+ const int isb = (1 << 16) | (1 << 0);
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500,
+ nv_rd32(dev, 0x400500) | isb);
+ }
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fb9bdd6edf1f..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -383,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ nouveau_bo_unpin(dev_priv->vga_ram);
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -622,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
+
/* GART */
#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_device_is_agp(dev) && dev->agp) {
@@ -653,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
drm_get_resource_len(dev, 1),
DRM_MTRR_WC);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
+ uint32_t flags;
int ret;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
- TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ if (nouveau_vram_notify)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
0, 0x0000, false, true, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(ntfy, flags);
if (ret)
goto out_err;
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
+ if (dev_priv->card_type >= NV_50)
+ offset += dev_priv->vm_gart_base;
}
} else {
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj_ret)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_nouveau_private *dev_priv;
struct nouveau_gpuobj *gpuobj;
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
return -EINVAL;
+ dev_priv = chan->dev->dev_private;
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
* the card will hang early on in the X init process.
*/
# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_GRAPH_UNITS 0x00001540
#define NV40_PMC_BACKLIGHT 0x000015f0
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- NV_DEBUG(nvbe->dev, "\n");
+ struct drm_device *dev;
if (nvbe && nvbe->pages) {
+ dev = nvbe->dev;
+ NV_DEBUG(dev, "\n");
+
if (nvbe->bound)
be->func->unbind(be);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 09b9a46dfc0e..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
+ struct drm_device *dev = priv;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ nv_wr32(dev, 0x88054, state);
+ else
+ nv_wr32(dev, 0x1854, state);
+
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_timer;
- /* PGRAPH */
- ret = engine->graph.init(dev);
- if (ret)
- goto out_fb;
+ if (nouveau_noaccel)
+ engine->graph.accel_blocked = true;
+ else {
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ goto out_fb;
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_graph;
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+ }
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
out_irq:
drm_irq_uninstall(dev);
out_fifo:
- engine->fifo.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->fifo.takedown(dev);
out_graph:
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->graph.takedown(dev);
out_fb:
engine->fb.takedown(dev);
out_timer:
@@ -518,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
dev_priv->channel = NULL;
}
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
engine->fb.takedown(dev);
engine->timer.takedown(dev);
engine->mc.takedown(dev);
mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_sgdma_takedown(dev);
@@ -816,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
default:
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
FIRE_RING(chan);
- info->fbops->fb_fillrect = nv04_fbcon_fillrect;
- info->fbops->fb_copyarea = nv04_fbcon_copyarea;
- info->fbops->fb_imageblit = nv04_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
- switch (dev_priv->chipset & 0xf0) {
+ switch (dev_priv->chipset) {
case 0x40:
case 0x47:
case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG_KMS(dev, "\n");
+ struct drm_device *dev;
+ struct nouveau_crtc *nv_crtc;
if (!crtc)
return;
+ dev = crtc->dev;
+ nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(dev, "\n");
+
drm_crtc_cleanup(&nv_crtc->base);
nv50_cursor_fini(nv_crtc);
@@ -432,6 +435,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ uint32_t dac = 0, sor = 0;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -439,9 +443,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (drm_helper_encoder_in_use(encoder))
+ if (!drm_helper_encoder_in_use(encoder))
continue;
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV)
+ dac |= (1 << nv_encoder->or);
+ else
+ sor |= (1 << nv_encoder->or);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV) {
+ if (dac & (1 << nv_encoder->or))
+ continue;
+ } else {
+ if (sor & (1 << nv_encoder->or))
+ continue;
+ }
+
nv_encoder->disconnect(nv_encoder);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
dev_priv->vm_vram_base);
- info->fbops->fb_fillrect = nv50_fbcon_fillrect;
- info->fbops->fb_copyarea = nv50_fbcon_copyarea;
- info->fbops->fb_imageblit = nv50_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 39caf167587d..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
ramfc = chan->ramfc->gpuobj;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
0, &chan->cache);
if (ret)
return ret;
@@ -317,17 +317,20 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
-
+ /* This will ensure the channel is seen as disabled. */
+ chan->ramfc = NULL;
nv50_fifo_channel_disable(dev, chan->id, false);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, false);
+
+ nouveau_gpuobj_ref_del(dev, &ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
}
int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x401804, 0xc0000000);
+ nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
uint32_t inst;
int i;
+ /* Be sure we're not in the middle of a context switch or bad things
+ * will happen, such as unloading the wrong pgraph context.
+ */
+ if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "Ctxprog is still running\n");
+
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
@@ -275,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
int
nv50_graph_unload_context(struct drm_device *dev)
{
- uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+ uint32_t inst;
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return 0;
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
- nv_wr32(dev, 0x400500, fifo & ~1);
+ nouveau_wait_for_idle(dev);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400500, fifo);
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,25 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder *enc;
uint32_t val;
int or = nv_encoder->or;
NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+ nv_encoder->last_dpms = mode;
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nvenc = nouveau_encoder(enc);
+
+ if (nvenc == nv_encoder ||
+ nvenc->disconnect != nv50_sor_disconnect ||
+ nvenc->dcb->or != nv_encoder->dcb->or)
+ continue;
+
+ if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
+ return;
+ }
+
/* wait for it to be done */
if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default"
+ bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ This is a completely new driver. It's only part of the existing drm
+ for compatibility reasons. It requires an entirely different graphics
+ stack above it and works very differently from the old drm stack.
+ i.e. don't enable this unless you know what you are doing it may
+ cause issues or bugs compared to the previous userspace driver stack.
When kernel modesetting is enabled the IOCTL of radeon/drm
driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..2a3df5599ab4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#define ATOM_DEBUG
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
- val = le32_to_cpu(ctx->ps[idx]);
+ /* get_unaligned_le32 avoids unaligned accesses from atombios
+ * tables, noticed on a DEC Alpha. */
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -246,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr;
break;
+ case ATOM_WS_REGPTR:
+ val = gctx->reg_block;
+ break;
default:
val = ctx->ws[idx];
}
@@ -385,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
return atom_get_src_int(ctx, attr, ptr, NULL, 1);
}
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+ uint32_t val = 0xCDCDCDCD;
+
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ break;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ break;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ break;
+ }
+ return val;
+}
+
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print)
{
@@ -482,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val;
break;
+ case ATOM_WS_REGPTR:
+ gctx->reg_block = val;
+ break;
default:
ctx->ws[idx] = val;
}
@@ -677,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: ");
- src1 = atom_get_src(ctx, attr, ptr);
+ src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr);
dst &= src1;
@@ -809,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
}
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
@@ -834,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
@@ -937,18 +1004,18 @@ static struct {
atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, {
- atom_op_shl, ATOM_ARG_REG}, {
- atom_op_shl, ATOM_ARG_PS}, {
- atom_op_shl, ATOM_ARG_WS}, {
- atom_op_shl, ATOM_ARG_FB}, {
- atom_op_shl, ATOM_ARG_PLL}, {
- atom_op_shl, ATOM_ARG_MC}, {
- atom_op_shr, ATOM_ARG_REG}, {
- atom_op_shr, ATOM_ARG_PS}, {
- atom_op_shr, ATOM_ARG_WS}, {
- atom_op_shr, ATOM_ARG_FB}, {
- atom_op_shr, ATOM_ARG_PLL}, {
- atom_op_shr, ATOM_ARG_MC}, {
+ atom_op_shift_left, ATOM_ARG_REG}, {
+ atom_op_shift_left, ATOM_ARG_PS}, {
+ atom_op_shift_left, ATOM_ARG_WS}, {
+ atom_op_shift_left, ATOM_ARG_FB}, {
+ atom_op_shift_left, ATOM_ARG_PLL}, {
+ atom_op_shift_left, ATOM_ARG_MC}, {
+ atom_op_shift_right, ATOM_ARG_REG}, {
+ atom_op_shift_right, ATOM_ARG_PS}, {
+ atom_op_shift_right, ATOM_ARG_WS}, {
+ atom_op_shift_right, ATOM_ARG_FB}, {
+ atom_op_shift_right, ATOM_ARG_PLL}, {
+ atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1125,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
- /* reset reg block */
- ctx->reg_block = 0;
ectx.ctx = ctx;
ectx.ps_shift = ps / 4;
ectx.start = base;
@@ -1096,6 +1161,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
mutex_lock(&ctx->mutex);
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
+#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc dtd timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
}
}
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+union adjust_pixel_clock {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct radeon_pll *pll)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
- uint8_t frev, crev;
- int index;
- SET_PIXEL_CLOCK_PS_ALLOCATION args;
- PIXEL_CLOCK_PARAMETERS *spc1_ptr;
- PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
- PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
- uint32_t pll_clock = mode->clock;
- uint32_t adjusted_clock;
- uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- int pll_flags = 0;
+ u32 adjusted_clock = mode->clock;
- memset(&args, 0, sizeof(args));
+ /* reset the pll flags */
+ pll->flags = 0;
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
- RADEON_PLL_PREFER_CLOSEST_LOWER);
+ pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+ RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
} else {
- pll_flags |= RADEON_PLL_LEGACY;
+ pll->flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
- if (!ASIC_IS_AVIVO(rdev)) {
- if (encoder->encoder_type !=
- DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS)
- pll_flags |= RADEON_PLL_USE_REF_DIV;
- }
radeon_encoder = to_radeon_encoder(encoder);
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+ adjusted_clock = mode->clock * 2;
+ } else {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ }
break;
}
}
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
* special hw requirements.
*/
if (ASIC_IS_DCE3(rdev)) {
- ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+ union adjust_pixel_clock args;
+ struct radeon_encoder_atom_dig *dig;
+ u8 frev, crev;
+ int index;
- if (!encoder)
- return;
-
- memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
- adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
- adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
- adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ if (!radeon_encoder->enc_priv)
+ return adjusted_clock;
+ dig = radeon_encoder->enc_priv;
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
- atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&adjust_pll_args);
- adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
- } else {
- /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
- if (ASIC_IS_AVIVO(rdev) &&
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
- adjusted_clock = mode->clock * 2;
- else
- adjusted_clock = mode->clock;
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ memset(&args, 0, sizeof(args));
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
}
+ return adjusted_clock;
+}
+
+union set_pixel_clock {
+ SET_PIXEL_CLOCK_PS_ALLOCATION base;
+ PIXEL_CLOCK_PARAMETERS v1;
+ PIXEL_CLOCK_PARAMETERS_V2 v2;
+ PIXEL_CLOCK_PARAMETERS_V3 v3;
+};
+
+void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ u32 adjusted_clock;
+
+ memset(&args, 0, sizeof(args));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
+ /* adjust pixel clock as needed */
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
case 1:
switch (crev) {
case 1:
- spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
- spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc1_ptr->ucFracFbDiv = frac_fb_div;
- spc1_ptr->ucPostDiv = post_div;
- spc1_ptr->ucPpll =
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.usRefDiv = cpu_to_le16(ref_div);
+ args.v1.usFbDiv = cpu_to_le16(fb_div);
+ args.v1.ucFracFbDiv = frac_fb_div;
+ args.v1.ucPostDiv = post_div;
+ args.v1.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc1_ptr->ucRefDivSrc = 1;
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ args.v1.ucRefDivSrc = 1;
break;
case 2:
- spc2_ptr =
- (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
- spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc2_ptr->ucFracFbDiv = frac_fb_div;
- spc2_ptr->ucPostDiv = post_div;
- spc2_ptr->ucPpll =
+ args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v2.usRefDiv = cpu_to_le16(ref_div);
+ args.v2.usFbDiv = cpu_to_le16(fb_div);
+ args.v2.ucFracFbDiv = frac_fb_div;
+ args.v2.ucPostDiv = post_div;
+ args.v2.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc2_ptr->ucRefDivSrc = 1;
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucRefDivSrc = 1;
break;
case 3:
- if (!encoder)
- return;
- spc3_ptr =
- (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
- spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc3_ptr->ucFracFbDiv = frac_fb_div;
- spc3_ptr->ucPostDiv = post_div;
- spc3_ptr->ucPpll =
+ args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.usRefDiv = cpu_to_le16(ref_div);
+ args.v3.usFbDiv = cpu_to_le16(fb_div);
+ args.v3.ucFracFbDiv = frac_fb_div;
+ args.v3.ucPostDiv = post_div;
+ args.v3.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
- spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
- spc3_ptr->ucEncoderMode =
+ args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucTransmitterId = radeon_encoder->encoder_id;
+ args.v3.ucEncoderMode =
atombios_get_encoder_mode(encoder);
break;
default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
return;
}
- printk("executing set pll\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_set_base(crtc, x, y, old_fb);
+ else
+ return radeon_crtc_set_base(crtc, x, y, old_fb);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ u32 disp_merge_cntl;
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ break;
+ case 1:
+ disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+ break;
+ }
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
else {
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- radeon_crtc_set_base(crtc, x, y, old_fb);
- radeon_legacy_atom_set_surface(crtc);
+ atombios_crtc_set_base(crtc, x, y, old_fb);
+ radeon_legacy_atom_fixup(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3eb0ca5b3d73..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
+ int retry_count = 0;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+retry:
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus) {
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ if (args.ucReplyStatus && !args.ucDataOutLen) {
+ if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ goto retry;
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus);
+ chan->rec.i2c_id, args.ucReplyStatus, retry_count);
return false;
}
@@ -468,7 +472,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
@@ -583,7 +587,7 @@ void dp_link_train(struct drm_encoder *encoder,
u8 train_set[4];
int i;
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
@@ -596,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
return;
dig_connector = radeon_connector->con_priv;
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_LINK_A;
- } else {
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
- }
+ if (dig->dig_encoder)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8760d66e058a..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Who ever call radeon_fence_emit should call ring_lock and ask
- * for enough space (today caller are ib schedule and buffer move) */
+ /* We have to make sure that caches are flushed before
+ * CPU might read something from VRAM. */
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -1504,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
+ track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
@@ -3368,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -3399,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -3482,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case 5:
case 6:
case 7:
+ /* 1D/2D */
track->textures[i].tex_coord_type = 0;
break;
case 1:
- track->textures[i].tex_coord_type = 1;
+ /* CUBE */
+ track->textures[i].tex_coord_type = 2;
break;
case 2:
- track->textures[i].tex_coord_type = 2;
+ /* 3D */
+ track->textures[i].tex_coord_type = 1;
break;
}
break;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
+
tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
+ tmp &= R300_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0: rdev->mc.vram_width = 64; break;
+ case 1: rdev->mc.vram_width = 128; break;
+ case 2: rdev->mc.vram_width = 256; break;
+ default: rdev->mc.vram_width = 128; break;
}
r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
+ radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 053404e71a9d..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -391,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r420_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f5ff3490929f..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.align_mask = 16 - 1;
}
+void r600_cp_fini(struct radeon_device *rdev)
+{
+ r600_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
+
/*
* GPU scratch registers helpers function.
@@ -1792,23 +1794,24 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RB_INT_STAT);
}
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -1864,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
-
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
}
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1948,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio resume failed\n");
+ return r;
+ }
+
return r;
}
@@ -1955,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
return 0;
}
@@ -2026,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = r600_mc_init(rdev);
if (r)
return r;
@@ -2051,22 +2069,25 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
- r600_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
@@ -2078,20 +2099,17 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- /* Suspend operations */
- r600_suspend(rdev);
-
r600_audio_fini(rdev);
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2197,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
- rdev->ih.align_mask = 4 - 1;
+ rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+ rdev->ih.rptr = 0;
}
-static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+static int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
- rdev->ih.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2234,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
return r;
}
}
- rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->ih.rptr = 0;
-
return 0;
}
@@ -2386,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
/* allocate ring */
- ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ ret = r600_ih_ring_alloc(rdev);
if (ret)
return ret;
@@ -2449,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
return ret;
}
-void r600_irq_fini(struct radeon_device *rdev)
+void r600_irq_suspend(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_irq_suspend(rdev);
r600_ih_ring_fini(rdev);
}
@@ -2467,8 +2487,12 @@ int r600_irq_set(struct radeon_device *rdev)
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
return 0;
+ }
if (ASIC_IS_DCE3(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2638,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- WARN_ON(1);
- /* XXX deal with overflow */
- DRM_ERROR("IH RB overflow\n");
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
- wptr = wptr & WPTR_OFFSET_MASK;
-
- return wptr;
+ return (wptr & rdev->ih.ptr_mask);
}
/* r600 IV Ring
@@ -2683,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 last_entry = rdev->ih.ring_size - 16;
u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
unsigned long flags;
bool queue_hotplug = false;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
spin_lock_irqsave(&rdev->ih.lock, flags);
@@ -2817,10 +2844,8 @@ restart_ih:
}
/* wptr/rptr are in bytes! */
- if (rptr == last_entry)
- rptr = 0;
- else
- rptr += 16;
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
}
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
@@ -2888,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return rdev->family >= CHIP_R600
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
if (!r600_audio_chipset_supported(rdev))
return;
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
-
del_timer(&rdev->audio_timer);
+ WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8787ea89dc6e..af1c3ca8a4cb 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0)) {
- dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
- goto out_unref;
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-out_unref:
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
@@ -555,7 +558,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
int dwords_per_loop = 76, num_loops;
r = r600_vb_ib_get(rdev);
- WARN_ON(r);
+ if (r)
+ return r;
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -581,7 +585,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size += 5; /* done copy */
ring_size += 7; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
- WARN_ON(r);
+ if (r)
+ return r;
set_default_state(rdev); /* 14 */
set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+struct r600_cs_track {
+ u32 cb_color0_base_last;
+};
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
}
/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
* r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_reloc *reloc;
+ struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
+ track = (struct r600_cs_track *)p->track;
ib = p->ib->ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color0_base_last) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] = track->cb_color0_base_last;
+ printk_once(KERN_WARNING "radeon: You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color0_base_last = ib[idx+1+i];
+ break;
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r600_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
+ struct r600_cs_track *track;
int r;
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ p->track = track;
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
+ parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
+#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280E0_BASE_256B 0x00000000
+#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
+#define R_0280C0_CB_COLOR0_TILE 0x0280C0
+#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280C0_BASE_256B 0x00000000
+#define R_0280C4_CB_COLOR1_TILE 0x0280C4
+#define R_0280C8_CB_COLOR2_TILE 0x0280C8
+#define R_0280CC_CB_COLOR3_TILE 0x0280CC
+#define R_0280D0_CB_COLOR4_TILE 0x0280D0
+#define R_0280D4_CB_COLOR5_TILE 0x0280D4
+#define R_0280D8_CB_COLOR6_TILE 0x0280D8
+#define R_0280DC_CB_COLOR7_TILE 0x0280DC
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index eb5f99b9469d..f57480ba1355 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -410,13 +410,13 @@ struct r600_ih {
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
- uint32_t align_mask;
uint32_t ptr_mask;
spinlock_t lock;
bool enabled;
};
struct r600_blit {
+ struct mutex mutex;
struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
@@ -465,6 +465,7 @@ struct radeon_cs_chunk {
};
struct radeon_cs_parser {
+ struct device *dev;
struct radeon_device *rdev;
struct drm_file *filp;
/* chunks */
@@ -660,6 +661,13 @@ struct radeon_asic {
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
};
/*
@@ -847,7 +855,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -857,7 +865,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1017,6 +1025,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
+/* AGP */
+extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1140,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
+extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1160,7 +1171,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
-
+extern void r600_irq_suspend(struct radeon_device *rdev);
+/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 220f454ea9fa..c0681a5556dc 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_info(rdev->ddev, &info);
if (ret) {
+ drm_agp_release(rdev->ddev);
DRM_ERROR("Unable to get AGP info: %d\n", ret);
return ret;
}
+
+ if (rdev->ddev->agp->agp_info.aper_size < 32) {
+ drm_agp_release(rdev->ddev);
+ dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+ "need at least 32M, disabling AGP\n",
+ rdev->ddev->agp->agp_info.aper_size);
+ return -EINVAL;
+ }
+
mode.mode = info.mode;
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_enable(rdev->ddev, mode);
if (ret) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+ drm_agp_release(rdev->ddev);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..2dcda6115874 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+ * even though they are wired up to record 0x93
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+ (dev->pdev->subsystem_device == 0x2452)) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
+
+ /* r100 doesn't have dma engine so skip the test */
+ if (rdev->asic->copy_dma) {
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE, fence);
+
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
- if (r) {
- goto out_cleanup;
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+ " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+ n, size >> 10,
+ sdomain, ddomain, time,
+ i, i * 1000, (i * 1000) / 1024);
}
- r = radeon_fence_wait(fence, false);
- if (r) {
- goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
- " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
- sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
+
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
else if (post_div == 3)
sclk >>= 2;
else if (post_div == 4)
- sclk >>= 4;
+ sclk >>= 3;
return sclk;
}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
else if (post_div == 3)
mclk >>= 2;
else if (post_div == 4)
- mclk >>= 4;
+ mclk >>= 3;
return mclk;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
- if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
- lvds->panel_vcc_delay = 2000;
+ lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..238188540017 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
- bool dret;
+ bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret;
+ bool dret = false;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
- 1);
+ radeon_connector->dac_load_detect);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..1190148cf5e6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -189,7 +189,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (error) {
+ if (error && parser->ib) {
radeon_bo_list_unvalidate(&parser->validated,
parser->ib->fence);
} else {
@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
+ parser.dev = rdev->dev;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0c51f8e46613..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
void radeon_check_arguments(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0ec491ead2ff..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+ }
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -357,7 +366,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (dig->dp_i2c_bus)
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
}
if (!radeon_connector->ddc_bus)
@@ -410,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
+ uint32_t min_post_div = pll->min_post_div;
+ uint32_t max_post_div = pll->max_post_div;
uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco;
@@ -430,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
- if (flags & RADEON_PLL_USE_REF_DIV)
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
while (min_ref_div < max_ref_div-1) {
@@ -445,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
}
}
- if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ min_post_div = max_post_div = pll->post_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
+ for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div;
- if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
/* legacy radeons only have a few post_divs */
- if (flags & RADEON_PLL_LEGACY) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
@@ -504,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
- if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+ if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
@@ -531,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
- } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
@@ -572,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
@@ -667,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
radeonfb_remove(dev, fb);
if (radeon_fb->obj) {
- radeon_gem_object_unpin(radeon_fb->obj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(radeon_fb->obj);
mutex_unlock(&dev->struct_mutex);
@@ -715,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-
+ if (obj == NULL) {
+ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handle);
+ return NULL;
+ }
return radeon_framebuffer_create(dev, mode_cmd, obj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 82eb551970b9..3c91724457ca 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
return ret;
}
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->devices & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -676,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = dig->dig_block + 1;
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- num = 2;
- break;
- }
- }
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ num = dig->dig_encoder + 1;
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
@@ -822,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
+ if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
@@ -849,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1;
}
} else {
+
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+ if (dig->dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
if (dig_connector->igp_lane_info & 0x3)
@@ -878,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
}
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- break;
}
if (radeon_encoder->pixel_clock > 165000)
@@ -1046,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
union crtc_sourc_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args));
@@ -1109,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (ASIC_IS_DCE32(rdev)) {
- if (radeon_crtc->crtc_id)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else {
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return;
- radeon_connector = to_radeon_connector(connector);
- if (!radeon_connector->con_priv)
- return;
- dig_connector = radeon_connector->con_priv;
-
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- }
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ if (dig->dig_encoder)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1202,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ uint32_t dig_enc_in_use = 0;
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -1214,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
-
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1379,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (radeon_encoder_is_digital(encoder)) {
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
radeon_encoder->active_device = 0;
}
@@ -1436,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
/* coherent mode by default */
dig->coherent_mode = true;
+ dig->dig_encoder = -1;
return dig;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- memset_io(fbptr, 0xff, aligned_size);
+ memset_io(fbptr, 0x0, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (robj->rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* properly set crtc bpp when using atombios */
-void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int format;
- uint32_t crtc_gen_cntl;
- uint32_t disp_merge_cntl;
- uint32_t crtc_pitch;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- format = 2;
- break;
- case 15: /* 555 */
- format = 3;
- break;
- case 16: /* 565 */
- format = 4;
- break;
- case 24: /* RGB */
- format = 5;
- break;
- case 32: /* xRGB */
- format = 6;
- break;
- default:
- return;
- }
-
- crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
- ((crtc->fb->bits_per_pixel * 8) - 1)) /
- (crtc->fb->bits_per_pixel * 8));
- crtc_pitch |= crtc_pitch << 16;
-
- WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
-
- switch (radeon_crtc->crtc_id) {
- case 0:
- disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
- WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
- WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
- break;
- case 1:
- disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
- WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
- WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
- WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
- break;
- }
-}
-
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
uint32_t post_divider = 0;
uint32_t freq = 0;
uint8_t pll_gain;
- int pll_flags = RADEON_PLL_LEGACY;
bool use_bios_divs = false;
/* PLL registers */
uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p1pll;
+ pll->flags = RADEON_PLL_LEGACY;
+
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
}
- pll_flags |= RADEON_PLL_USE_REF_DIV;
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
}
}
}
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
- &reference_div, &post_divider,
- pll_flags);
+ &reference_div, &post_divider);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 91cb041cb40d..e81b2aeb6a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -125,16 +125,24 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV (1 << 12)
struct radeon_pll {
- uint16_t reference_freq;
- uint16_t reference_div;
+ /* reference frequency */
+ uint32_t reference_freq;
+
+ /* fixed dividers */
+ uint32_t reference_div;
+ uint32_t post_div;
+
+ /* pll in/out limits */
uint32_t pll_in_min;
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
- uint16_t xclk;
+ uint32_t best_vco;
+ /* divider limits */
uint32_t min_ref_div;
uint32_t max_ref_div;
uint32_t min_post_div;
@@ -143,7 +151,12 @@ struct radeon_pll {
uint32_t max_feedback_div;
uint32_t min_frac_feedback_div;
uint32_t max_frac_feedback_div;
- uint32_t best_vco;
+
+ /* flags for the current clock */
+ uint32_t flags;
+
+ /* pll id */
+ uint32_t id;
};
struct radeon_i2c_chan {
@@ -286,7 +299,7 @@ struct radeon_atom_ss {
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
- int dig_block;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_setup_encoder_clones(struct drm_device *dev);
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
-extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4e636de877b2..d72a71bff218 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
- if (rdev->flags & RADEON_IS_IGP) {
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+ if (0 && (rdev->flags & RADEON_IS_IGP)) {
if (rdev->mc.igp_sideport_enabled == false)
/* Useless to evict on IGP chips */
return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
0x2648 RE_POINTSIZE
0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 2)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
+ if (rs400_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+ "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
}
}
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
r100_mc_stop(rdev, &save);
/* Wait for mc idle */
- if (r300_mc_wait_for_idle(rdev))
- dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ if (rs400_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 59c71245fb91..5943d561fd1e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,26 +887,25 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
rv770_gpu_init(rdev);
-
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
}
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -972,13 +967,16 @@ int rv770_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
}
return 0;
}
@@ -1037,6 +1035,11 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = rv770_mc_init(rdev);
if (r)
return r;
@@ -1062,22 +1065,25 @@ int rv770_init(struct radeon_device *rdev)
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
- rv770_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
return 0;
@@ -1085,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- rv770_suspend(rdev);
-
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock);
- }
+ } else
+ bo->offset = 0;
return 0;
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry, *nentry;
- struct list_head *list, *next;
- int ret;
+ struct ttm_buffer_object *entry = NULL;
+ int ret = 0;
spin_lock(&glob->lru_lock);
- list_for_each_safe(list, next, &bdev->ddestroy) {
- entry = list_entry(list, struct ttm_buffer_object, ddestroy);
- nentry = NULL;
+ if (list_empty(&bdev->ddestroy))
+ goto out_unlock;
- /*
- * Protect the next list entry from destruction while we
- * unlock the lru_lock.
- */
+ entry = list_first_entry(&bdev->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ kref_get(&entry->list_kref);
- if (next != &bdev->ddestroy) {
- nentry = list_entry(next, struct ttm_buffer_object,
- ddestroy);
+ for (;;) {
+ struct ttm_buffer_object *nentry = NULL;
+
+ if (entry->ddestroy.next != &bdev->ddestroy) {
+ nentry = list_first_entry(&entry->ddestroy,
+ struct ttm_buffer_object, ddestroy);
kref_get(&nentry->list_kref);
}
- kref_get(&entry->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(entry, remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
+ entry = nentry;
+
+ if (ret || !entry)
+ goto out;
spin_lock(&glob->lru_lock);
- if (nentry) {
- bool next_onlist = !list_empty(next);
- spin_unlock(&glob->lru_lock);
- kref_put(&nentry->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- /*
- * Someone might have raced us and removed the
- * next entry from the list. We don't bother restarting
- * list traversal.
- */
-
- if (!next_onlist)
- break;
- }
- if (ret)
+ if (list_empty(&entry->ddestroy))
break;
}
- ret = !list_empty(&bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+out_unlock:
+ spin_unlock(&glob->lru_lock);
+out:
+ if (entry)
+ kref_put(&entry->list_kref, ttm_bo_release_list);
return ret;
}
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ mem->mm_node = NULL;
+ return 0;
+ }
+
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
@@ -1019,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (node && placement->lpfn != 0 &&
+ (node->start < placement->fpfn ||
+ node->start + node->size > placement->lpfn))
+ return -1;
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
@@ -1844,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* anyone tries to access a ttm page.
*/
+ if (bo->bdev->driver->swap_notify)
+ bo->bdev->driver->swap_notify(bo);
+
ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
out:
@@ -1864,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
;
}
+EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
- save_flags = old_mem->placement;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
void *old_iomap;
void *new_iomap;
int ret;
- uint32_t save_flags = old_mem->placement;
unsigned long i;
unsigned long page;
unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- uint32_t save_flags = old_mem->placement;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
+EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
+EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
- struct ttm_base_object *obj;
enum ttm_ref_type ref_type;
+ struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..e2123af7775a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -198,17 +198,26 @@ EXPORT_SYMBOL(ttm_tt_populate);
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_state)
{
+ int ret = 0;
+
if (PageHighMem(p))
return 0;
- switch (c_state) {
- case tt_cached:
- return set_pages_wb(p, 1);
- case tt_wc:
- return set_memory_wc((unsigned long) page_address(p), 1);
- default:
- return set_pages_uc(p, 1);
+ if (get_page_memtype(p) != -1) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+
+ ret = set_pages_wb(p, 1);
+ if (ret)
+ return ret;
}
+
+ if (c_state == tt_wc)
+ ret = set_memory_wc((unsigned long) page_address(p), 1);
+ else if (c_state == tt_uncached)
+ ret = set_pages_uc(p, 1);
+
+ return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
+struct ttm_placement vmw_vram_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
struct ttm_placement vmw_vram_ne_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ if (new_mem->mem_type != TTM_PL_SYSTEM)
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_wait = vmw_sync_obj_wait,
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
- .sync_obj_ref = vmw_sync_obj_ref
+ .sync_obj_ref = vmw_sync_obj_ref,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..a6e8f687fa64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr);
static void vmw_print_capabilities(uint32_t capabilities)
{
@@ -207,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
int ret;
+ uint32_t svga_id;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -217,6 +220,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
+ dev_priv->last_read_sequence = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
rwlock_init(&dev_priv->resource_lock);
@@ -236,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ ret = -ENOSYS;
+ DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
if (dev_priv->capabilities & SVGA_CAP_GMR) {
@@ -351,6 +365,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_fb_init(dev_priv);
}
+ dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+ register_pm_notifier(&dev_priv->pm_nb);
+
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
+
return 0;
out_no_device:
@@ -385,6 +404,8 @@ static int vmw_driver_unload(struct drm_device *dev)
DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
+ unregister_pm_notifier(&dev_priv->pm_nb);
+
if (!dev_priv->stealth) {
vmw_fb_close(dev_priv);
vmw_kms_close(dev_priv);
@@ -650,6 +671,57 @@ static void vmw_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr)
+{
+ struct vmw_private *dev_priv =
+ container_of(nb, struct vmw_private, pm_nb);
+ struct vmw_master *vmaster = dev_priv->active_master;
+
+ switch (val) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ttm_suspend_lock(&vmaster->lock);
+
+ /**
+ * This empties VRAM and unbinds all GMR bindings.
+ * Buffer contents is moved to swappable memory.
+ */
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ttm_suspend_unlock(&vmaster->lock);
+ break;
+ case PM_RESTORE_PREPARE:
+ break;
+ case PM_POST_RESTORE:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * These might not be needed with the virtual SVGA device.
+ */
+
+int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int vmw_pci_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ return pci_enable_device(pdev);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -689,7 +761,9 @@ static struct drm_driver driver = {
.name = VMWGFX_DRIVER_NAME,
.id_table = vmw_pci_id_list,
.probe = vmw_probe,
- .remove = vmw_remove
+ .remove = vmw_remove,
+ .suspend = vmw_pci_suspend,
+ .resume = vmw_pci_resume
},
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,16 +32,17 @@
#include "drmP.h"
#include "vmwgfx_drm.h"
#include "drm_hashtab.h"
+#include "linux/suspend.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
-#define VMWGFX_DRIVER_DATE "20090724"
-#define VMWGFX_DRIVER_MAJOR 0
-#define VMWGFX_DRIVER_MINOR 1
-#define VMWGFX_DRIVER_PATCHLEVEL 2
+#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_MAJOR 1
+#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -95,6 +96,8 @@ struct vmw_surface {
struct drm_vmw_size *sizes;
uint32_t num_sizes;
+ bool scanout;
+
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
};
@@ -110,6 +113,7 @@ struct vmw_fifo_state {
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
+ struct mutex fifo_mutex;
struct rw_semaphore rwsem;
};
@@ -210,7 +214,7 @@ struct vmw_private {
* Fencing and IRQs.
*/
- uint32_t fence_seq;
+ atomic_t fence_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
atomic_t fence_queue_waiters;
@@ -258,6 +262,7 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
+ struct notifier_block pm_nb;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +358,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
+extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -386,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *sequence);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
+extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -401,6 +408,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
return 0;
+ /**
+ * Put BO in VRAM, only if there is space.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ if (unlikely(ret == -ERESTARTSYS))
+ return ret;
+
+ /**
+ * Otherwise, set it up as GMR.
+ */
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
ret = vmw_gmr_bind(dev_priv, bo);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..4f4f6432be8b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- if (vmw_bo->gmr_bound) {
- vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
- spin_lock(&bo->glob->lru_lock);
- ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
- spin_unlock(&bo->glob->lru_lock);
- vmw_bo->gmr_bound = NULL;
- }
-
ret = ttm_bo_validate(bo, &ne_placement, false, false);
ttm_bo_unreserve(bo);
err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -29,6 +29,25 @@
#include "drmP.h"
#include "ttm/ttm_placement.h"
+bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t fifo_min, hwversion;
+
+ fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
+ return false;
+
+ hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ if (hwversion == 0)
+ return false;
+
+ if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+ return false;
+
+ return true;
+}
+
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
+ mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
/*
@@ -98,8 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min,
(unsigned int) fifo->capabilities);
- dev_priv->fence_seq = (uint32_t) -100;
- dev_priv->last_read_sequence = (uint32_t) -100;
+ atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -265,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
- down_write(&fifo_state->rwsem);
+ mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -333,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
}
out_err:
fifo_state->reserved_size = 0;
- up_write(&fifo_state->rwsem);
+ mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
@@ -408,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
}
+ down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
@@ -419,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
mb();
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
up_write(&fifo_state->rwsem);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ mutex_unlock(&fifo_state->fifo_mutex);
}
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -433,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
- down_write(&fifo_state->rwsem);
- *sequence = dev_priv->fence_seq;
- up_write(&fifo_state->rwsem);
+ *sequence = atomic_read(&dev_priv->fence_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
false, 3*HZ);
@@ -443,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
}
do {
- *sequence = dev_priv->fence_seq++;
+ *sequence = atomic_add_return(1, &dev_priv->fence_seq);
} while (*sequence == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5fa6a4ed238a..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
- param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
+ param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
break;
case DRM_VMW_PARAM_FIFO_OFFSET:
param->value = dev_priv->mmio_start;
break;
+ case DRM_VMW_PARAM_HW_CAPS:
+ param->value = dev_priv->capabilities;
+ break;
+ case DRM_VMW_PARAM_FIFO_CAPS:
+ param->value = dev_priv->fifo.capabilities;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
return true;
/**
- * Below is to signal stale fences that have wrapped.
- * First, block fence submission.
- */
-
- down_read(&fifo_state->rwsem);
-
- /**
* Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
- ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
- up_read(&fifo_state->rwsem);
+ ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ > VMW_FENCE_WRAP);
return ret;
}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle)
down_read(&fifo_state->rwsem);
- signal_seq = dev_priv->fence_seq;
+ signal_seq = atomic_read(&dev_priv->fence_seq);
ret = 0;
for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
} *cmd;
int i, increment = 1;
- if (!num_clips ||
- !(dev_priv->fifo.capabilities &
- SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd[i].body.x = cpu_to_le32(clips[i].x1);
- cmd[i].body.y = cpu_to_le32(clips[i].y1);
- cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1);
- cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1);
+ cmd[i].body.x = cpu_to_le32(clips->x1);
+ cmd[i].body.y = cpu_to_le32(clips->y1);
+ cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
+ cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
@@ -709,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret)
goto try_dmabuf;
+ if (!surface->scanout)
+ goto err_not_scanout;
+
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd->width, mode_cmd->height);
@@ -742,6 +743,13 @@ try_dmabuf:
}
return &vfb->base;
+
+err_not_scanout:
+ DRM_ERROR("surface not marked as scanout\n");
+ /* vmw_user_surface_lookup takes one ref */
+ vmw_surface_unreference(&surface);
+
+ return NULL;
}
static int vmw_kms_fb_changed(struct drm_device *dev)
@@ -761,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
- dev->mode_config.min_width = 640;
- dev->mode_config.min_height = 480;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.min_width = 1;
+ dev->mode_config.min_height = 1;
+ dev->mode_config.max_width = dev_priv->fb_max_width;
+ dev->mode_config.max_height = dev_priv->fb_max_height;
ret = vmw_kms_init_legacy_display_system(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
bool pin, bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_placement *overlay_placement = &vmw_vram_placement;
int ret;
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->gmr_bound) {
- vmw_gmr_unbind(dev_priv, buf->gmr_id);
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
- spin_unlock(&glob->lru_lock);
- buf->gmr_bound = NULL;
- }
-
if (pin)
overlay_placement = &vmw_vram_ne_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->flags = req->flags;
srf->format = req->format;
+ srf->scanout = req->scanout;
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,6 +600,26 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_err1;
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
user_srf->base.shareable = false;
user_srf->base.tfile = NULL;
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return ret;
}
- if (srf->flags & (1 << 9) &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
- /* clear the image */
- if (srf->snooper.image)
- memset(srf->snooper.image, 0x00, 64 * 64 * 4);
- else
- DRM_ERROR("Failed to allocate cursor_image\n");
-
- } else {
- srf->snooper.image = NULL;
- }
- srf->snooper.crtc = NULL;
-
rep->sid = user_srf->base.hash.key;
if (rep->sid == SVGA3D_INVALID_ID)
DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
return bo_user_size + page_array_size;
}
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
struct vmw_private *dev_priv =
container_of(bo->bdev, struct vmw_private, bdev);
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
if (vmw_bo->gmr_bound) {
vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
spin_lock(&glob->lru_lock);
ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
spin_unlock(&glob->lru_lock);
+ vmw_bo->gmr_bound = false;
}
+}
+
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+
+ vmw_dmabuf_gmr_unbind(bo);
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
struct ttm_bo_global *glob = bo->glob;
- struct vmw_private *dev_priv =
- container_of(bo->bdev, struct vmw_private, bdev);
+ vmw_dmabuf_gmr_unbind(bo);
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
- if (vmw_bo->gmr_bound) {
- vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
- spin_unlock(&glob->lru_lock);
- }
kfree(vmw_user_bo);
}
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
}
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_placement, true,
+ &vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
return ret;