diff options
Diffstat (limited to 'drivers/gpu')
217 files changed, 23248 insertions, 9319 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 305c59003963..88910e5a2c77 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,6 +9,7 @@ menuconfig DRM depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU select I2C select I2C_ALGOBIT + select SLOW_WORK help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -59,6 +60,7 @@ config DRM_RADEON select FW_LOADER select DRM_KMS_HELPER select DRM_TTM + select POWER_SUPPLY help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index d68888fe3df9..ba38e0147220 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -33,6 +33,7 @@ #include "drmP.h" #include <linux/module.h> +#include <linux/slab.h> #if __OS_HAS_AGP diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 932b5aa96a67..3f46772f0cb2 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, struct drm_device *dev = master->minor->dev; DRM_DEBUG("%d\n", magic); - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - memset(entry, 0, sizeof(*entry)); entry->priv = priv; entry->hash_item.key = (unsigned long)magic; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 8417cc4c43f1..2092e7bb788f 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -34,6 +34,7 @@ */ #include <linux/vmalloc.h> +#include <linux/slab.h> #include <linux/log2.h> #include <asm/shmparam.h> #include "drmP.h" @@ -960,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } - /* No allocations failed, so now we can replace the orginal pagelist + /* No allocations failed, so now we can replace the original pagelist * with the new one. */ if (dma->page_count) { diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d91fb8c0b7b3..57cea01c4ffb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -30,9 +30,11 @@ * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/list.h> +#include <linux/slab.h> #include "drm.h" #include "drmP.h" #include "drm_crtc.h" +#include "drm_edid.h" struct drm_prop_enum_list { int type; @@ -493,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector) list_for_each_entry_safe(mode, t, &connector->user_modes, head) drm_mode_remove(connector, mode); - kfree(connector->fb_helper_private); mutex_lock(&dev->mode_config.mutex); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); @@ -857,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev) mutex_init(&dev->mode_config.mutex); mutex_init(&dev->mode_config.idr_mutex); INIT_LIST_HEAD(&dev->mode_config.fb_list); - INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); @@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); - if (ret) + if (ret) { + ret = -EFAULT; goto out_err2; + } } if (fb->funcs->dirty) { @@ -2349,7 +2351,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; - int ret = 0; + int ret = 0, size; if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); @@ -2361,7 +2363,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, return ret; } - connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); + size = EDID_LENGTH * (1 + edid->extensions); + connector->edid_blob_ptr = drm_property_create_blob(connector->dev, + size, edid); ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f2aaf39be398..9b2a54117c91 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector, } /** - * drm_helper_probe_connector_modes - get complete set of display modes + * drm_helper_probe_single_connector_modes - get complete set of display modes * @dev: DRM device * @maxX: max width for modes * @maxY: max height for modes @@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("%s is disconnected\n", drm_get_connector_name(connector)); + drm_mode_connector_update_edid_property(connector, NULL); goto prune; } @@ -153,21 +154,6 @@ prune: } EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); -int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, - uint32_t maxY) -{ - struct drm_connector *connector; - int count = 0; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - count += drm_helper_probe_single_connector_modes(connector, - maxX, maxY); - } - - return count; -} -EXPORT_SYMBOL(drm_helper_probe_connector_modes); - /** * drm_helper_encoder_in_use - check if a given encoder is in use * @encoder: encoder to check @@ -262,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_disable_unused_functions); -static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height) -{ - struct drm_display_mode *mode; - - list_for_each_entry(mode, &connector->modes, head) { - if (drm_mode_width(mode) > width || - drm_mode_height(mode) > height) - continue; - if (mode->type & DRM_MODE_TYPE_PREFERRED) - return mode; - } - return NULL; -} - -static bool drm_has_cmdline_mode(struct drm_connector *connector) -{ - struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; - struct drm_fb_helper_cmdline_mode *cmdline_mode; - - if (!fb_help_conn) - return false; - - cmdline_mode = &fb_help_conn->cmdline_mode; - return cmdline_mode->specified; -} - -static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height) -{ - struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; - struct drm_fb_helper_cmdline_mode *cmdline_mode; - struct drm_display_mode *mode = NULL; - - if (!fb_help_conn) - return mode; - - cmdline_mode = &fb_help_conn->cmdline_mode; - if (cmdline_mode->specified == false) - return mode; - - /* attempt to find a matching mode in the list of modes - * we have gotten so far, if not add a CVT mode that conforms - */ - if (cmdline_mode->rb || cmdline_mode->margins) - goto create_mode; - - list_for_each_entry(mode, &connector->modes, head) { - /* check width/height */ - if (mode->hdisplay != cmdline_mode->xres || - mode->vdisplay != cmdline_mode->yres) - continue; - - if (cmdline_mode->refresh_specified) { - if (mode->vrefresh != cmdline_mode->refresh) - continue; - } - - if (cmdline_mode->interlace) { - if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) - continue; - } - return mode; - } - -create_mode: - mode = drm_cvt_mode(connector->dev, cmdline_mode->xres, - cmdline_mode->yres, - cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, - cmdline_mode->rb, cmdline_mode->interlace, - cmdline_mode->margins); - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - list_add(&mode->head, &connector->modes); - return mode; -} - -static bool drm_connector_enabled(struct drm_connector *connector, bool strict) -{ - bool enable; - - if (strict) { - enable = connector->status == connector_status_connected; - } else { - enable = connector->status != connector_status_disconnected; - } - return enable; -} - -static void drm_enable_connectors(struct drm_device *dev, bool *enabled) -{ - bool any_enabled = false; - struct drm_connector *connector; - int i = 0; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - enabled[i] = drm_connector_enabled(connector, true); - DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, - enabled[i] ? "yes" : "no"); - any_enabled |= enabled[i]; - i++; - } - - if (any_enabled) - return; - - i = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - enabled[i] = drm_connector_enabled(connector, false); - i++; - } -} - -static bool drm_target_preferred(struct drm_device *dev, - struct drm_display_mode **modes, - bool *enabled, int width, int height) -{ - struct drm_connector *connector; - int i = 0; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - - if (enabled[i] == false) { - i++; - continue; - } - - DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", - connector->base.id); - - /* got for command line mode first */ - modes[i] = drm_pick_cmdline_mode(connector, width, height); - if (!modes[i]) { - DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", - connector->base.id); - modes[i] = drm_has_preferred_mode(connector, width, height); - } - /* No preferred modes, pick one off the list */ - if (!modes[i] && !list_empty(&connector->modes)) { - list_for_each_entry(modes[i], &connector->modes, head) - break; - } - DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : - "none"); - i++; - } - return true; -} - -static int drm_pick_crtcs(struct drm_device *dev, - struct drm_crtc **best_crtcs, - struct drm_display_mode **modes, - int n, int width, int height) -{ - int c, o; - struct drm_connector *connector; - struct drm_connector_helper_funcs *connector_funcs; - struct drm_encoder *encoder; - struct drm_crtc *best_crtc; - int my_score, best_score, score; - struct drm_crtc **crtcs, *crtc; - - if (n == dev->mode_config.num_connector) - return 0; - c = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (c == n) - break; - c++; - } - - best_crtcs[n] = NULL; - best_crtc = NULL; - best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); - if (modes[n] == NULL) - return best_score; - - crtcs = kmalloc(dev->mode_config.num_connector * - sizeof(struct drm_crtc *), GFP_KERNEL); - if (!crtcs) - return best_score; - - my_score = 1; - if (connector->status == connector_status_connected) - my_score++; - if (drm_has_cmdline_mode(connector)) - my_score++; - if (drm_has_preferred_mode(connector, width, height)) - my_score++; - - connector_funcs = connector->helper_private; - encoder = connector_funcs->best_encoder(connector); - if (!encoder) - goto out; - - connector->encoder = encoder; - - /* select a crtc for this connector and then attempt to configure - remaining connectors */ - c = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - - if ((encoder->possible_crtcs & (1 << c)) == 0) { - c++; - continue; - } - - for (o = 0; o < n; o++) - if (best_crtcs[o] == crtc) - break; - - if (o < n) { - /* ignore cloning for now */ - c++; - continue; - } - - crtcs[n] = crtc; - memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *)); - score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, - width, height); - if (score > best_score) { - best_crtc = crtc; - best_score = score; - memcpy(best_crtcs, crtcs, - dev->mode_config.num_connector * - sizeof(struct drm_crtc *)); - } - c++; - } -out: - kfree(crtcs); - return best_score; -} - -static void drm_setup_crtcs(struct drm_device *dev) -{ - struct drm_crtc **crtcs; - struct drm_display_mode **modes; - struct drm_encoder *encoder; - struct drm_connector *connector; - bool *enabled; - int width, height; - int i, ret; - - DRM_DEBUG_KMS("\n"); - - width = dev->mode_config.max_width; - height = dev->mode_config.max_height; - - /* clean out all the encoder/crtc combos */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - encoder->crtc = NULL; - } - - crtcs = kcalloc(dev->mode_config.num_connector, - sizeof(struct drm_crtc *), GFP_KERNEL); - modes = kcalloc(dev->mode_config.num_connector, - sizeof(struct drm_display_mode *), GFP_KERNEL); - enabled = kcalloc(dev->mode_config.num_connector, - sizeof(bool), GFP_KERNEL); - - drm_enable_connectors(dev, enabled); - - ret = drm_target_preferred(dev, modes, enabled, width, height); - if (!ret) - DRM_ERROR("Unable to find initial modes\n"); - - DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); - - drm_pick_crtcs(dev, crtcs, modes, 0, width, height); - - i = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_display_mode *mode = modes[i]; - struct drm_crtc *crtc = crtcs[i]; - - if (connector->encoder == NULL) { - i++; - continue; - } - - if (mode && crtc) { - DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", - mode->name, crtc->base.id); - crtc->desired_mode = mode; - connector->encoder->crtc = crtc; - } else { - connector->encoder->crtc = NULL; - connector->encoder = NULL; - } - i++; - } - - kfree(crtcs); - kfree(modes); - kfree(enabled); -} - /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test @@ -935,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) ret = -EINVAL; goto fail; } - /* TODO are these needed? */ - set->crtc->desired_x = set->x; - set->crtc->desired_y = set->y; - set->crtc->desired_mode = set->mode; } drm_helper_disable_unused_functions(dev); } else if (fb_changed) { @@ -983,63 +669,6 @@ fail: } EXPORT_SYMBOL(drm_crtc_helper_set_config); -bool drm_helper_plugged_event(struct drm_device *dev) -{ - DRM_DEBUG_KMS("\n"); - - drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, - dev->mode_config.max_height); - - drm_setup_crtcs(dev); - - /* alert the driver fb layer */ - dev->mode_config.funcs->fb_changed(dev); - - /* FIXME: send hotplug event */ - return true; -} -/** - * drm_initial_config - setup a sane initial connector configuration - * @dev: DRM device - * - * LOCKING: - * Called at init time, must take mode config lock. - * - * Scan the CRTCs and connectors and try to put together an initial setup. - * At the moment, this is a cloned configuration across all heads with - * a new framebuffer object as the backing store. - * - * RETURNS: - * Zero if everything went ok, nonzero otherwise. - */ -bool drm_helper_initial_config(struct drm_device *dev) -{ - int count = 0; - - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - - drm_fb_helper_parse_command_line(dev); - - count = drm_helper_probe_connector_modes(dev, - dev->mode_config.max_width, - dev->mode_config.max_height); - - /* - * we shouldn't end up with no modes here. - */ - if (count == 0) - printk(KERN_INFO "No connectors reported connected with modes\n"); - - drm_setup_crtcs(dev); - - /* alert the driver fb layer */ - dev->mode_config.funcs->fb_changed(dev); - - return 0; -} -EXPORT_SYMBOL(drm_helper_initial_config); - static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) { int dpms = DRM_MODE_DPMS_OFF; @@ -1122,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) } EXPORT_SYMBOL(drm_helper_connector_dpms); -/** - * drm_hotplug_stage_two - * @dev DRM device - * @connector hotpluged connector - * - * LOCKING. - * Caller must hold mode config lock, function might grab struct lock. - * - * Stage two of a hotplug. - * - * RETURNS: - * Zero on success, errno on failure. - */ -int drm_helper_hotplug_stage_two(struct drm_device *dev) -{ - drm_helper_plugged_event(dev); - - return 0; -} -EXPORT_SYMBOL(drm_helper_hotplug_stage_two); - int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd) { @@ -1199,3 +807,114 @@ int drm_helper_resume_force_mode(struct drm_device *dev) return 0; } EXPORT_SYMBOL(drm_helper_resume_force_mode); + +static struct slow_work_ops output_poll_ops; + +#define DRM_OUTPUT_POLL_PERIOD (10*HZ) +static void output_poll_execute(struct slow_work *work) +{ + struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); + struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); + struct drm_connector *connector; + enum drm_connector_status old_status, status; + bool repoll = false, changed = false; + int ret; + + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + + /* if this is HPD or polled don't check it - + TV out for instance */ + if (!connector->polled) + continue; + + else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) + repoll = true; + + old_status = connector->status; + /* if we are connected and don't want to poll for disconnect + skip it */ + if (old_status == connector_status_connected && + !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && + !(connector->polled & DRM_CONNECTOR_POLL_HPD)) + continue; + + status = connector->funcs->detect(connector); + if (old_status != status) + changed = true; + } + + mutex_unlock(&dev->mode_config.mutex); + + if (changed) { + /* send a uevent + call fbdev */ + drm_sysfs_hotplug_event(dev); + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + } + + if (repoll) { + ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); + if (ret) + DRM_ERROR("delayed enqueue failed %d\n", ret); + } +} + +void drm_kms_helper_poll_disable(struct drm_device *dev) +{ + if (!dev->mode_config.poll_enabled) + return; + delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); +} +EXPORT_SYMBOL(drm_kms_helper_poll_disable); + +void drm_kms_helper_poll_enable(struct drm_device *dev) +{ + bool poll = false; + struct drm_connector *connector; + int ret; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->polled) + poll = true; + } + + if (poll) { + ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); + if (ret) + DRM_ERROR("delayed enqueue failed %d\n", ret); + } +} +EXPORT_SYMBOL(drm_kms_helper_poll_enable); + +void drm_kms_helper_poll_init(struct drm_device *dev) +{ + slow_work_register_user(THIS_MODULE); + delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, + &output_poll_ops); + dev->mode_config.poll_enabled = true; + + drm_kms_helper_poll_enable(dev); +} +EXPORT_SYMBOL(drm_kms_helper_poll_init); + +void drm_kms_helper_poll_fini(struct drm_device *dev) +{ + drm_kms_helper_poll_disable(dev); + slow_work_unregister_user(THIS_MODULE); +} +EXPORT_SYMBOL(drm_kms_helper_poll_fini); + +void drm_helper_hpd_irq_event(struct drm_device *dev) +{ + if (!dev->mode_config.poll_enabled) + return; + delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); + /* schedule a slow work asap */ + delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); +} +EXPORT_SYMBOL(drm_helper_hpd_irq_event); + +static struct slow_work_ops output_poll_ops = { + .execute = output_poll_execute, +}; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9903f270e440..677b275fa721 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -32,6 +32,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index 13f1537413fb..252cbd74df0e 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev) { int i; - dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); + dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); if (!dev->dma) return -ENOMEM; - memset(dev->dma, 0, sizeof(*dev->dma)); - for (i = 0; i <= DRM_MAX_ORDER; i++) memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c index 548887c8506f..f7eba0a0973a 100644 --- a/drivers/gpu/drm/drm_dp_i2c_helper.c +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c @@ -23,7 +23,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index f3c58e2bd75c..4a66201edaec 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -47,6 +47,7 @@ */ #include <linux/debugfs.h> +#include <linux/slab.h> #include "drmP.h" #include "drm_core.h" diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f97e7c42ac8e..9585e531ac6b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2,6 +2,7 @@ * Copyright (c) 2006 Luc Verhaegen (quirks list) * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> + * Copyright 2010 Red Hat, Inc. * * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from * FB layer. @@ -27,15 +28,15 @@ * DEALINGS IN THE SOFTWARE. */ #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include "drmP.h" #include "drm_edid.h" -/* - * TODO: - * - support EDID 1.4 (incl. CE blocks) - */ +#define EDID_EST_TIMINGS 16 +#define EDID_STD_TIMINGS 8 +#define EDID_DETAILED_TIMINGS 4 /* * EDID blocks out in the wild have a variety of bugs, try to collect @@ -64,7 +65,8 @@ #define LEVEL_DMT 0 #define LEVEL_GTF 1 -#define LEVEL_CVT 2 +#define LEVEL_GTF2 2 +#define LEVEL_CVT 3 static struct edid_quirk { char *vendor; @@ -84,6 +86,8 @@ static struct edid_quirk { /* Envision Peripherals, Inc. EN-7100e */ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, + /* Envision EN2028 */ + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, /* Funai Electronics PM36B */ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | @@ -106,51 +110,64 @@ static struct edid_quirk { { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, }; +/*** DDC fetch and block validation ***/ -/* Valid EDID header has these bytes */ static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; -/** - * drm_edid_is_valid - sanity check EDID data - * @edid: EDID data - * - * Sanity check the EDID block by looking at the header, the version number - * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's - * valid. +/* + * Sanity check the EDID block (base or extension). Return 0 if the block + * doesn't check out, or 1 if it's valid. */ -bool drm_edid_is_valid(struct edid *edid) +static bool +drm_edid_block_valid(u8 *raw_edid) { - int i, score = 0; + int i; u8 csum = 0; - u8 *raw_edid = (u8 *)edid; + struct edid *edid = (struct edid *)raw_edid; + + if (raw_edid[0] == 0x00) { + int score = 0; - for (i = 0; i < sizeof(edid_header); i++) - if (raw_edid[i] == edid_header[i]) - score++; + for (i = 0; i < sizeof(edid_header); i++) + if (raw_edid[i] == edid_header[i]) + score++; - if (score == 8) ; - else if (score >= 6) { - DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); - memcpy(raw_edid, edid_header, sizeof(edid_header)); - } else - goto bad; + if (score == 8) ; + else if (score >= 6) { + DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); + memcpy(raw_edid, edid_header, sizeof(edid_header)); + } else { + goto bad; + } + } for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); - goto bad; - } - if (edid->version != 1) { - DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); - goto bad; + /* allow CEA to slide through, switches mangle this */ + if (raw_edid[0] != 0x02) + goto bad; } - if (edid->revision > 4) - DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); + /* per-block-type checks */ + switch (raw_edid[0]) { + case 0: /* base */ + if (edid->version != 1) { + DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); + goto bad; + } + + if (edid->revision > 4) + DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); + break; + + default: + break; + } return 1; @@ -162,8 +179,158 @@ bad: } return 0; } + +/** + * drm_edid_is_valid - sanity check EDID data + * @edid: EDID data + * + * Sanity-check an entire EDID record (including extensions) + */ +bool drm_edid_is_valid(struct edid *edid) +{ + int i; + u8 *raw = (u8 *)edid; + + if (!edid) + return false; + + for (i = 0; i <= edid->extensions; i++) + if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) + return false; + + return true; +} EXPORT_SYMBOL(drm_edid_is_valid); +#define DDC_ADDR 0x50 +#define DDC_SEGMENT_ADDR 0x30 +/** + * Get EDID information via I2C. + * + * \param adapter : i2c device adaptor + * \param buf : EDID data buffer to be filled + * \param len : EDID data buffer length + * \return 0 on success or -1 on failure. + * + * Try to fetch EDID information by calling i2c driver function. + */ +static int +drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, + int block, int len) +{ + unsigned char start = block * EDID_LENGTH; + struct i2c_msg msgs[] = { + { + .addr = DDC_ADDR, + .flags = 0, + .len = 1, + .buf = &start, + }, { + .addr = DDC_ADDR, + .flags = I2C_M_RD, + .len = len, + .buf = buf + start, + } + }; + + if (i2c_transfer(adapter, msgs, 2) == 2) + return 0; + + return -1; +} + +static u8 * +drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) +{ + int i, j = 0; + u8 *block, *new; + + if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) + return NULL; + + /* base block fetch */ + for (i = 0; i < 4; i++) { + if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) + goto out; + if (drm_edid_block_valid(block)) + break; + } + if (i == 4) + goto carp; + + /* if there's no extensions, we're done */ + if (block[0x7e] == 0) + return block; + + new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); + if (!new) + goto out; + block = new; + + for (j = 1; j <= block[0x7e]; j++) { + for (i = 0; i < 4; i++) { + if (drm_do_probe_ddc_edid(adapter, block, j, + EDID_LENGTH)) + goto out; + if (drm_edid_block_valid(block + j * EDID_LENGTH)) + break; + } + if (i == 4) + goto carp; + } + + return block; + +carp: + dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n", + drm_get_connector_name(connector), j); + +out: + kfree(block); + return NULL; +} + +/** + * Probe DDC presence. + * + * \param adapter : i2c device adaptor + * \return 1 on success + */ +static bool +drm_probe_ddc(struct i2c_adapter *adapter) +{ + unsigned char out; + + return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); +} + +/** + * drm_get_edid - get EDID data, if available + * @connector: connector we're probing + * @adapter: i2c adapter to use for DDC + * + * Poke the given i2c channel to grab EDID data if possible. If found, + * attach it to the connector. + * + * Return edid data or NULL if we couldn't find any. + */ +struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid = NULL; + + if (drm_probe_ddc(adapter)) + edid = (struct edid *)drm_do_get_edid(connector, adapter); + + connector->display_info.raw_edid = (char *)edid; + + return edid; + +} +EXPORT_SYMBOL(drm_get_edid); + +/*** EDID parsing ***/ + /** * edid_vendor - match a string against EDID's obfuscated vendor field * @edid: EDID to match @@ -332,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@85Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, - 1072, 1376, 0, 768, 769, 772, 808, 0, + 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, @@ -423,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = { 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@75Hz */ - { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664, + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@85Hz */ @@ -494,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = { static const int drm_num_dmt_modes = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); -static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, - int hsize, int vsize, int fresh) +struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, + int hsize, int vsize, int fresh) { int i; struct drm_display_mode *ptr, *mode; @@ -513,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, } return mode; } +EXPORT_SYMBOL(drm_mode_find_dmt); + +typedef void detailed_cb(struct detailed_timing *timing, void *closure); + +static void +drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) +{ + int i; + struct edid *edid = (struct edid *)raw_edid; + + if (edid == NULL) + return; + + for (i = 0; i < EDID_DETAILED_TIMINGS; i++) + cb(&(edid->detailed_timings[i]), closure); + + /* XXX extension block walk */ +} + +static void +is_rb(struct detailed_timing *t, void *data) +{ + u8 *r = (u8 *)t; + if (r[3] == EDID_DETAIL_MONITOR_RANGE) + if (r[15] & 0x10) + *(bool *)data = true; +} + +/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ +static bool +drm_monitor_supports_rb(struct edid *edid) +{ + if (edid->revision >= 4) { + bool ret; + drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); + return ret; + } + + return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); +} + +static void +find_gtf2(struct detailed_timing *t, void *data) +{ + u8 *r = (u8 *)t; + if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) + *(u8 **)data = r; +} + +/* Secondary GTF curve kicks in above some break frequency */ +static int +drm_gtf2_hbreak(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? (r[12] * 2) : 0; +} + +static int +drm_gtf2_2c(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[13] : 0; +} + +static int +drm_gtf2_m(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? (r[15] << 8) + r[14] : 0; +} + +static int +drm_gtf2_k(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[16] : 0; +} + +static int +drm_gtf2_2j(struct edid *edid) +{ + u8 *r = NULL; + drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); + return r ? r[17] : 0; +} + +/** + * standard_timing_level - get std. timing level(CVT/GTF/DMT) + * @edid: EDID block to scan + */ +static int standard_timing_level(struct edid *edid) +{ + if (edid->revision >= 2) { + if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) + return LEVEL_CVT; + if (drm_gtf2_hbreak(edid)) + return LEVEL_GTF2; + return LEVEL_GTF; + } + return LEVEL_DMT; +} /* * 0 is reserved. The spec says 0x01 fill for unused timings. Some old @@ -533,22 +805,20 @@ bad_std_timing(u8 a, u8 b) * * Take the standard timing params (in this case width, aspect, and refresh) * and convert them into a real mode using CVT/GTF/DMT. - * - * Punts for now, but should eventually use the FB layer's CVT based mode - * generation code. */ -struct drm_display_mode *drm_mode_std(struct drm_device *dev, - struct std_timing *t, - int revision, - int timing_level) +static struct drm_display_mode * +drm_mode_std(struct drm_connector *connector, struct edid *edid, + struct std_timing *t, int revision) { - struct drm_display_mode *mode; + struct drm_device *dev = connector->dev; + struct drm_display_mode *m, *mode = NULL; int hsize, vsize; int vrefresh_rate; unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) >> EDID_TIMING_ASPECT_SHIFT; unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) >> EDID_TIMING_VFREQ_SHIFT; + int timing_level = standard_timing_level(edid); if (bad_std_timing(t->hsize, t->vfreq_aspect)) return NULL; @@ -569,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, vsize = (hsize * 4) / 5; else vsize = (hsize * 9) / 16; - /* HDTV hack */ - if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { - mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, + + /* HDTV hack, part 1 */ + if (vrefresh_rate == 60 && + ((hsize == 1360 && vsize == 765) || + (hsize == 1368 && vsize == 769))) { + hsize = 1366; + vsize = 768; + } + + /* + * If this connector already has a mode for this size and refresh + * rate (because it came from detailed or CVT info), use that + * instead. This way we don't have to guess at interlace or + * reduced blanking. + */ + list_for_each_entry(m, &connector->probed_modes, head) + if (m->hdisplay == hsize && m->vdisplay == vsize && + drm_mode_vrefresh(m) == vrefresh_rate) + return NULL; + + /* HDTV hack, part 2 */ + if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { + mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, false); mode->hdisplay = 1366; - mode->vsync_start = mode->vsync_start - 1; - mode->vsync_end = mode->vsync_end - 1; + mode->hsync_start = mode->hsync_start - 1; + mode->hsync_end = mode->hsync_end - 1; return mode; } - mode = NULL; + /* check whether it can be found in default mode table */ - mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); if (mode) return mode; @@ -590,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, case LEVEL_GTF: mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); break; + case LEVEL_GTF2: + /* + * This is potentially wrong if there's ever a monitor with + * more than one ranges section, each claiming a different + * secondary GTF curve. Please don't do that. + */ + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { + kfree(mode); + mode = drm_gtf_mode_complex(dev, hsize, vsize, + vrefresh_rate, 0, 0, + drm_gtf2_m(edid), + drm_gtf2_2c(edid), + drm_gtf2_k(edid), + drm_gtf2_2j(edid)); + } + break; case LEVEL_CVT: mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, false); @@ -622,13 +929,11 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode, { 1440, 576 }, { 2880, 576 }, }; - static const int n_sizes = - sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); if (!(pt->misc & DRM_EDID_PT_INTERLACED)) return; - for (i = 0; i < n_sizes; i++) { + for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { if ((mode->hdisplay == cea_interlaced[i].w) && (mode->vdisplay == cea_interlaced[i].h / 2)) { mode->vdisplay *= 2; @@ -707,25 +1012,16 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vtotal = mode->vdisplay + vblank; - /* perform the basic check for the detailed timing */ - if (mode->hsync_end > mode->htotal || - mode->vsync_end > mode->vtotal) { - drm_mode_destroy(dev, mode); - DRM_DEBUG_KMS("Incorrect detailed timing. " - "Sync is beyond the blank.\n"); - return NULL; - } - /* Some EDIDs have bogus h/vtotal values */ if (mode->hsync_end > mode->htotal) mode->htotal = mode->hsync_end + 1; if (mode->vsync_end > mode->vtotal) mode->vtotal = mode->vsync_end + 1; - drm_mode_set_name(mode); - drm_mode_do_interlace_quirk(mode, pt); + drm_mode_set_name(mode); + if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } @@ -808,10 +1104,6 @@ static struct drm_display_mode edid_est_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ }; -#define EDID_EST_TIMINGS 16 -#define EDID_STD_TIMINGS 8 -#define EDID_DETAILED_TIMINGS 4 - /** * add_established_modes - get est. modes from EDID and add them * @edid: EDID block to scan @@ -839,19 +1131,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e return modes; } -/** - * stanard_timing_level - get std. timing level(CVT/GTF/DMT) - * @edid: EDID block to scan - */ -static int standard_timing_level(struct edid *edid) -{ - if (edid->revision >= 2) { - if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) - return LEVEL_CVT; - return LEVEL_GTF; - } - return LEVEL_DMT; -} /** * add_standard_modes - get std. modes from EDID and add them @@ -862,22 +1141,14 @@ static int standard_timing_level(struct edid *edid) */ static int add_standard_modes(struct drm_connector *connector, struct edid *edid) { - struct drm_device *dev = connector->dev; int i, modes = 0; - int timing_level; - - timing_level = standard_timing_level(edid); for (i = 0; i < EDID_STD_TIMINGS; i++) { - struct std_timing *t = &edid->standard_timings[i]; struct drm_display_mode *newmode; - /* If std timings bytes are 1, 1 it's empty */ - if (t->hsize == 1 && t->vfreq_aspect == 1) - continue; - - newmode = drm_mode_std(dev, &edid->standard_timings[i], - edid->revision, timing_level); + newmode = drm_mode_std(connector, edid, + &edid->standard_timings[i], + edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; @@ -887,36 +1158,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid return modes; } -/* - * XXX fix this for: - * - GTF secondary curve formula - * - EDID 1.4 range offsets - * - CVT extended bits - */ static bool -mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) +mode_is_rb(struct drm_display_mode *mode) { - struct detailed_data_monitor_range *range; - int hsync, vrefresh; - - range = &timing->data.other_data.data.range; + return (mode->htotal - mode->hdisplay == 160) && + (mode->hsync_end - mode->hdisplay == 80) && + (mode->hsync_end - mode->hsync_start == 32) && + (mode->vsync_start - mode->vdisplay == 3); +} +static bool +mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) +{ + int hsync, hmin, hmax; + + hmin = t[7]; + if (edid->revision >= 4) + hmin += ((t[4] & 0x04) ? 255 : 0); + hmax = t[8]; + if (edid->revision >= 4) + hmax += ((t[4] & 0x08) ? 255 : 0); hsync = drm_mode_hsync(mode); - vrefresh = drm_mode_vrefresh(mode); - if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) + return (hsync <= hmax && hsync >= hmin); +} + +static bool +mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) +{ + int vsync, vmin, vmax; + + vmin = t[5]; + if (edid->revision >= 4) + vmin += ((t[4] & 0x01) ? 255 : 0); + vmax = t[6]; + if (edid->revision >= 4) + vmax += ((t[4] & 0x02) ? 255 : 0); + vsync = drm_mode_vrefresh(mode); + + return (vsync <= vmax && vsync >= vmin); +} + +static u32 +range_pixel_clock(struct edid *edid, u8 *t) +{ + /* unspecified */ + if (t[9] == 0 || t[9] == 255) + return 0; + + /* 1.4 with CVT support gives us real precision, yay */ + if (edid->revision >= 4 && t[10] == 0x04) + return (t[9] * 10000) - ((t[12] >> 2) * 250); + + /* 1.3 is pathetic, so fuzz up a bit */ + return t[9] * 10000 + 5001; +} + +static bool +mode_in_range(struct drm_display_mode *mode, struct edid *edid, + struct detailed_timing *timing) +{ + u32 max_clock; + u8 *t = (u8 *)timing; + + if (!mode_in_hsync_range(mode, edid, t)) return false; - if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) + if (!mode_in_vsync_range(mode, edid, t)) return false; - if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { - /* be forgiving since it's in units of 10MHz */ - int max_clock = range->pixel_clock_mhz * 10 + 9; - max_clock *= 1000; + if ((max_clock = range_pixel_clock(edid, t))) if (mode->clock > max_clock) return false; - } + + /* 1.4 max horizontal check */ + if (edid->revision >= 4 && t[10] == 0x04) + if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) + return false; + + if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) + return false; return true; } @@ -925,15 +1246,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will * need to account for them. */ -static int drm_gtf_modes_for_range(struct drm_connector *connector, - struct detailed_timing *timing) +static int +drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < drm_num_dmt_modes; i++) { - if (mode_in_range(drm_dmt_modes + i, timing)) { + if (mode_in_range(drm_dmt_modes + i, edid, timing)) { newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); @@ -994,13 +1316,99 @@ static int drm_cvt_modes(struct drm_connector *connector, return modes; } +static const struct { + short w; + short h; + short r; + short rb; +} est3_modes[] = { + /* byte 6 */ + { 640, 350, 85, 0 }, + { 640, 400, 85, 0 }, + { 720, 400, 85, 0 }, + { 640, 480, 85, 0 }, + { 848, 480, 60, 0 }, + { 800, 600, 85, 0 }, + { 1024, 768, 85, 0 }, + { 1152, 864, 75, 0 }, + /* byte 7 */ + { 1280, 768, 60, 1 }, + { 1280, 768, 60, 0 }, + { 1280, 768, 75, 0 }, + { 1280, 768, 85, 0 }, + { 1280, 960, 60, 0 }, + { 1280, 960, 85, 0 }, + { 1280, 1024, 60, 0 }, + { 1280, 1024, 85, 0 }, + /* byte 8 */ + { 1360, 768, 60, 0 }, + { 1440, 900, 60, 1 }, + { 1440, 900, 60, 0 }, + { 1440, 900, 75, 0 }, + { 1440, 900, 85, 0 }, + { 1400, 1050, 60, 1 }, + { 1400, 1050, 60, 0 }, + { 1400, 1050, 75, 0 }, + /* byte 9 */ + { 1400, 1050, 85, 0 }, + { 1680, 1050, 60, 1 }, + { 1680, 1050, 60, 0 }, + { 1680, 1050, 75, 0 }, + { 1680, 1050, 85, 0 }, + { 1600, 1200, 60, 0 }, + { 1600, 1200, 65, 0 }, + { 1600, 1200, 70, 0 }, + /* byte 10 */ + { 1600, 1200, 75, 0 }, + { 1600, 1200, 85, 0 }, + { 1792, 1344, 60, 0 }, + { 1792, 1344, 85, 0 }, + { 1856, 1392, 60, 0 }, + { 1856, 1392, 75, 0 }, + { 1920, 1200, 60, 1 }, + { 1920, 1200, 60, 0 }, + /* byte 11 */ + { 1920, 1200, 75, 0 }, + { 1920, 1200, 85, 0 }, + { 1920, 1440, 60, 0 }, + { 1920, 1440, 75, 0 }, +}; + +static int +drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) +{ + int i, j, m, modes = 0; + struct drm_display_mode *mode; + u8 *est = ((u8 *)timing) + 5; + + for (i = 0; i < 6; i++) { + for (j = 7; j > 0; j--) { + m = (i * 8) + (7 - j); + if (m >= ARRAY_SIZE(est3_modes)) + break; + if (est[i] & (1 << j)) { + mode = drm_mode_find_dmt(connector->dev, + est3_modes[m].w, + est3_modes[m].h, + est3_modes[m].r + /*, est3_modes[m].rb */); + if (mode) { + drm_mode_probed_add(connector, mode); + modes++; + } + } + } + } + + return modes; +} + static int add_detailed_modes(struct drm_connector *connector, struct detailed_timing *timing, struct edid *edid, u32 quirks, int preferred) { int i, modes = 0; struct detailed_non_pixel *data = &timing->data.other_data; - int timing_level = standard_timing_level(edid); int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; @@ -1021,7 +1429,8 @@ static int add_detailed_modes(struct drm_connector *connector, switch (data->type) { case EDID_DETAIL_MONITOR_RANGE: if (gtf) - modes += drm_gtf_modes_for_range(connector, timing); + modes += drm_gtf_modes_for_range(connector, edid, + timing); break; case EDID_DETAIL_STD_MODES: /* Six modes per detailed section */ @@ -1030,8 +1439,8 @@ static int add_detailed_modes(struct drm_connector *connector, struct drm_display_mode *newmode; std = &data->data.timings[i]; - newmode = drm_mode_std(dev, std, edid->revision, - timing_level); + newmode = drm_mode_std(connector, edid, std, + edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; @@ -1041,6 +1450,9 @@ static int add_detailed_modes(struct drm_connector *connector, case EDID_DETAIL_CVT_3BYTE: modes += drm_cvt_modes(connector, timing); break; + case EDID_DETAIL_EST_TIMINGS: + modes += drm_est3_modes(connector, timing); + break; default: break; } @@ -1064,7 +1476,10 @@ static int add_detailed_info(struct drm_connector *connector, for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { struct detailed_timing *timing = &edid->detailed_timings[i]; - int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); + int preferred = (i == 0); + + if (preferred && edid->version == 1 && edid->revision < 4) + preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); /* In 1.0, only timings are allowed */ if (!timing->pixel_clock && edid->version == 1 && @@ -1094,39 +1509,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector, int i, modes = 0; char *edid_ext = NULL; struct detailed_timing *timing; - int edid_ext_num; int start_offset, end_offset; - int timing_level; - if (edid->version == 1 && edid->revision < 3) { - /* If the EDID version is less than 1.3, there is no - * extension EDID. - */ + if (edid->version == 1 && edid->revision < 3) return 0; - } - if (!edid->extensions) { - /* if there is no extension EDID, it is unnecessary to - * parse the E-EDID to get detailed info - */ + if (!edid->extensions) return 0; - } - - /* Chose real EDID extension number */ - edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? - DRM_MAX_EDID_EXT_NUM : edid->extensions; /* Find CEA extension */ - for (i = 0; i < edid_ext_num; i++) { + for (i = 0; i < edid->extensions; i++) { edid_ext = (char *)edid + EDID_LENGTH * (i + 1); - /* This block is CEA extension */ if (edid_ext[0] == 0x02) break; } - if (i == edid_ext_num) { - /* if there is no additional timing EDID block, return */ + if (i == edid->extensions) return 0; - } /* Get the start offset of detailed timing block */ start_offset = edid_ext[2]; @@ -1138,7 +1536,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, return 0; } - timing_level = standard_timing_level(edid); end_offset = EDID_LENGTH; end_offset -= sizeof(struct detailed_timing); for (i = start_offset; i < end_offset; @@ -1150,123 +1547,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, return modes; } -#define DDC_ADDR 0x50 -/** - * Get EDID information via I2C. - * - * \param adapter : i2c device adaptor - * \param buf : EDID data buffer to be filled - * \param len : EDID data buffer length - * \return 0 on success or -1 on failure. - * - * Try to fetch EDID information by calling i2c driver function. - */ -int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, - unsigned char *buf, int len) -{ - unsigned char start = 0x0; - struct i2c_msg msgs[] = { - { - .addr = DDC_ADDR, - .flags = 0, - .len = 1, - .buf = &start, - }, { - .addr = DDC_ADDR, - .flags = I2C_M_RD, - .len = len, - .buf = buf, - } - }; - - if (i2c_transfer(adapter, msgs, 2) == 2) - return 0; - - return -1; -} -EXPORT_SYMBOL(drm_do_probe_ddc_edid); - -static int drm_ddc_read_edid(struct drm_connector *connector, - struct i2c_adapter *adapter, - char *buf, int len) -{ - int i; - - for (i = 0; i < 4; i++) { - if (drm_do_probe_ddc_edid(adapter, buf, len)) - return -1; - if (drm_edid_is_valid((struct edid *)buf)) - return 0; - } - - /* repeated checksum failures; warn, but carry on */ - dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", - drm_get_connector_name(connector)); - return -1; -} - -/** - * drm_get_edid - get EDID data, if available - * @connector: connector we're probing - * @adapter: i2c adapter to use for DDC - * - * Poke the given connector's i2c channel to grab EDID data if possible. - * - * Return edid data or NULL if we couldn't find any. - */ -struct edid *drm_get_edid(struct drm_connector *connector, - struct i2c_adapter *adapter) -{ - int ret; - struct edid *edid; - - edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), - GFP_KERNEL); - if (edid == NULL) { - dev_warn(&connector->dev->pdev->dev, - "Failed to allocate EDID\n"); - goto end; - } - - /* Read first EDID block */ - ret = drm_ddc_read_edid(connector, adapter, - (unsigned char *)edid, EDID_LENGTH); - if (ret != 0) - goto clean_up; - - /* There are EDID extensions to be read */ - if (edid->extensions != 0) { - int edid_ext_num = edid->extensions; - - if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) { - dev_warn(&connector->dev->pdev->dev, - "The number of extension(%d) is " - "over max (%d), actually read number (%d)\n", - edid_ext_num, DRM_MAX_EDID_EXT_NUM, - DRM_MAX_EDID_EXT_NUM); - /* Reset EDID extension number to be read */ - edid_ext_num = DRM_MAX_EDID_EXT_NUM; - } - /* Read EDID including extensions too */ - ret = drm_ddc_read_edid(connector, adapter, (char *)edid, - EDID_LENGTH * (edid_ext_num + 1)); - if (ret != 0) - goto clean_up; - - } - - connector->display_info.raw_edid = (char *)edid; - goto end; - -clean_up: - kfree(edid); - edid = NULL; -end: - return edid; - -} -EXPORT_SYMBOL(drm_get_edid); - #define HDMI_IDENTIFIER 0x000C03 #define VENDOR_BLOCK 0x03 /** @@ -1279,7 +1559,7 @@ EXPORT_SYMBOL(drm_get_edid); bool drm_detect_hdmi_monitor(struct edid *edid) { char *edid_ext = NULL; - int i, hdmi_id, edid_ext_num; + int i, hdmi_id; int start_offset, end_offset; bool is_hdmi = false; @@ -1287,19 +1567,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid) if (edid == NULL || edid->extensions == 0) goto end; - /* Chose real EDID extension number */ - edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? - DRM_MAX_EDID_EXT_NUM : edid->extensions; - /* Find CEA extension */ - for (i = 0; i < edid_ext_num; i++) { + for (i = 0; i < edid->extensions; i++) { edid_ext = (char *)edid + EDID_LENGTH * (i + 1); /* This block is CEA extension */ if (edid_ext[0] == 0x02) break; } - if (i == edid_ext_num) + if (i == edid->extensions) goto end; /* Data block offset in CEA extension block */ @@ -1354,10 +1630,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) quirks = edid_get_quirks(edid); - num_modes += add_established_modes(connector, edid); - num_modes += add_standard_modes(connector, edid); + /* + * EDID spec says modes should be preferred in this order: + * - preferred detailed mode + * - other detailed modes from base block + * - detailed modes from extension blocks + * - CVT 3-byte code modes + * - standard timing codes + * - established timing codes + * - modes inferred from GTF or CVT range information + * + * We don't quite implement this yet, but we're close. + * + * XXX order for additional mode types in extension blocks? + */ num_modes += add_detailed_info(connector, edid, quirks); num_modes += add_detailed_info_eedid(connector, edid, quirks); + num_modes += add_standard_modes(connector, edid); + num_modes += add_established_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 50549703584f..719662034bbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -29,6 +29,7 @@ */ #include <linux/kernel.h> #include <linux/sysrq.h> +#include <linux/slab.h> #include <linux/fb.h> #include "drmP.h" #include "drm_crtc.h" @@ -41,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights"); static LIST_HEAD(kernel_fb_helper_list); -int drm_fb_helper_add_connector(struct drm_connector *connector) +/* simple single crtc case helper function */ +int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { - connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); - if (!connector->fb_helper_private) - return -ENOMEM; + struct drm_device *dev = fb_helper->dev; + struct drm_connector *connector; + int i; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct drm_fb_helper_connector *fb_helper_connector; + + fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); + if (!fb_helper_connector) + goto fail; + fb_helper_connector->connector = connector; + fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; + } return 0; +fail: + for (i = 0; i < fb_helper->connector_count; i++) { + kfree(fb_helper->connector_info[i]); + fb_helper->connector_info[i] = NULL; + } + fb_helper->connector_count = 0; + return -ENOMEM; } -EXPORT_SYMBOL(drm_fb_helper_add_connector); +EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); /** * drm_fb_helper_connector_parse_command_line - parse command line for connector @@ -64,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector); * * enable/enable Digital/disable bit at the end */ -static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, +static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn, const char *mode_option) { const char *name; @@ -74,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; int i; enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; - struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; struct drm_fb_helper_cmdline_mode *cmdline_mode; + struct drm_connector *connector = fb_helper_conn->connector; - if (!fb_help_conn) + if (!fb_helper_conn) return false; - cmdline_mode = &fb_help_conn->cmdline_mode; + cmdline_mode = &fb_helper_conn->cmdline_mode; if (!mode_option) mode_option = fb_mode_option; @@ -127,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con cvt = 1; break; case 'R': - if (!cvt) + if (cvt) rb = 1; break; case 'm': @@ -203,18 +222,21 @@ done: return true; } -int drm_fb_helper_parse_command_line(struct drm_device *dev) +static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { - struct drm_connector *connector; + struct drm_fb_helper_connector *fb_helper_conn; + int i; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + for (i = 0; i < fb_helper->connector_count; i++) { char *option = NULL; + fb_helper_conn = fb_helper->connector_info[i]; + /* do something on return - turn off connector maybe */ - if (fb_get_options(drm_get_connector_name(connector), &option)) + if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) continue; - drm_fb_helper_connector_parse_command_line(connector, option); + drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); } return 0; } @@ -242,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void) int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, void *panic_str) { - DRM_ERROR("panic occurred, switching back to text console\n"); + printk(KERN_ERR "panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); return 0; } @@ -283,6 +305,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; +#else +static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif static void drm_fb_helper_on(struct fb_info *info) @@ -290,40 +314,44 @@ static void drm_fb_helper_on(struct fb_info *info) struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; + struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_connector *connector; struct drm_encoder *encoder; - int i; + int i, j; /* * For each CRTC in this fb, turn the crtc on then, * find all associated encoders and turn them on. */ + mutex_lock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = - crtc->helper_private; + crtc = fb_helper->crtc_info[i].mode_set.crtc; + crtc_funcs = crtc->helper_private; - /* Only mess with CRTCs in this fb */ - if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || - !crtc->enabled) - continue; + if (!crtc->enabled) + continue; - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; + /* Walk the connectors & encoders on this fb turning them on */ + for (j = 0; j < fb_helper->connector_count; j++) { + connector = fb_helper->connector_info[j]->connector; + connector->dpms = DRM_MODE_DPMS_ON; + drm_connector_property_set_value(connector, + dev->mode_config.dpms_property, + DRM_MODE_DPMS_ON); + } + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; - encoder_funcs = encoder->helper_private; - mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); - } + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } } } + mutex_unlock(&dev->mode_config.mutex); } static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) @@ -331,39 +359,43 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; + struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_connector *connector; struct drm_encoder *encoder; - int i; + int i, j; /* * For each CRTC in this fb, find all associated encoders * and turn them off, then turn off the CRTC. */ + mutex_lock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = - crtc->helper_private; + crtc = fb_helper->crtc_info[i].mode_set.crtc; + crtc_funcs = crtc->helper_private; - /* Only mess with CRTCs in this fb */ - if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || - !crtc->enabled) - continue; + if (!crtc->enabled) + continue; - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; + /* Walk the connectors on this fb and mark them off */ + for (j = 0; j < fb_helper->connector_count; j++) { + connector = fb_helper->connector_info[j]->connector; + connector->dpms = dpms_mode; + drm_connector_property_set_value(connector, + dev->mode_config.dpms_property, + dpms_mode); + } + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; - encoder_funcs = encoder->helper_private; - mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, dpms_mode); - mutex_unlock(&dev->mode_config.mutex); - } + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, dpms_mode); } - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - mutex_unlock(&dev->mode_config.mutex); } + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } + mutex_unlock(&dev->mode_config.mutex); } int drm_fb_helper_blank(int blank, struct fb_info *info) @@ -398,50 +430,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) { int i; + for (i = 0; i < helper->connector_count; i++) + kfree(helper->connector_info[i]); + kfree(helper->connector_info); for (i = 0; i < helper->crtc_count; i++) kfree(helper->crtc_info[i].mode_set.connectors); kfree(helper->crtc_info); } -int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) +int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + int crtc_count, int max_conn_count) { - struct drm_device *dev = helper->dev; struct drm_crtc *crtc; int ret = 0; int i; - helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); - if (!helper->crtc_info) + fb_helper->dev = dev; + + INIT_LIST_HEAD(&fb_helper->kernel_fb_list); + + fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); + if (!fb_helper->crtc_info) return -ENOMEM; - helper->crtc_count = crtc_count; + fb_helper->crtc_count = crtc_count; + fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); + if (!fb_helper->connector_info) { + kfree(fb_helper->crtc_info); + return -ENOMEM; + } + fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { - helper->crtc_info[i].mode_set.connectors = + fb_helper->crtc_info[i].mode_set.connectors = kcalloc(max_conn_count, sizeof(struct drm_connector *), GFP_KERNEL); - if (!helper->crtc_info[i].mode_set.connectors) { + if (!fb_helper->crtc_info[i].mode_set.connectors) { ret = -ENOMEM; goto out_free; } - helper->crtc_info[i].mode_set.num_connectors = 0; + fb_helper->crtc_info[i].mode_set.num_connectors = 0; } i = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - helper->crtc_info[i].crtc_id = crtc->base.id; - helper->crtc_info[i].mode_set.crtc = crtc; + fb_helper->crtc_info[i].crtc_id = crtc->base.id; + fb_helper->crtc_info[i].mode_set.crtc = crtc; i++; } - helper->conn_limit = max_conn_count; + fb_helper->conn_limit = max_conn_count; return 0; out_free: - drm_fb_helper_crtc_free(helper); + drm_fb_helper_crtc_free(fb_helper); return -ENOMEM; } -EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); +EXPORT_SYMBOL(drm_fb_helper_init); + +void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) +{ + if (!list_empty(&fb_helper->kernel_fb_list)) { + list_del(&fb_helper->kernel_fb_list); + if (list_empty(&kernel_fb_helper_list)) { + printk(KERN_INFO "drm: unregistered panic notifier\n"); + atomic_notifier_chain_unregister(&panic_notifier_list, + &paniced); + unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); + } + } + + drm_fb_helper_crtc_free(fb_helper); + +} +EXPORT_SYMBOL(drm_fb_helper_fini); static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) @@ -505,20 +568,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; + struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; int i, rc = 0; int start; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - if (i == fb_helper->crtc_count) - continue; + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + crtc_funcs = crtc->helper_private; red = cmap->red; green = cmap->green; @@ -546,41 +604,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_setcmap); -int drm_fb_helper_setcolreg(unsigned regno, - unsigned red, - unsigned green, - unsigned blue, - unsigned transp, - struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; - int i; - int ret; - - if (regno > 255) - return 1; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - if (i == fb_helper->crtc_count) - continue; - - ret = setcolreg(crtc, red, green, blue, regno, info); - if (ret) - return ret; - - crtc_funcs->load_lut(crtc); - } - return 0; -} -EXPORT_SYMBOL(drm_fb_helper_setcolreg); - int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { @@ -684,23 +707,21 @@ int drm_fb_helper_set_par(struct fb_info *info) return -EINVAL; } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - if (i == fb_helper->crtc_count) - continue; - - if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { - mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); + mutex_lock(&dev->mode_config.mutex); + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); + if (ret) { mutex_unlock(&dev->mode_config.mutex); - if (ret) - return ret; + return ret; } } + mutex_unlock(&dev->mode_config.mutex); + + if (fb_helper->delayed_hotplug) { + fb_helper->delayed_hotplug = false; + drm_fb_helper_hotplug_event(fb_helper); + } return 0; } EXPORT_SYMBOL(drm_fb_helper_set_par); @@ -715,14 +736,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, int ret = 0; int i; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - - if (i == fb_helper->crtc_count) - continue; + mutex_lock(&dev->mode_config.mutex); + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; modeset = &fb_helper->crtc_info[i].mode_set; @@ -730,209 +746,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, modeset->y = var->yoffset; if (modeset->num_connectors) { - mutex_lock(&dev->mode_config.mutex); ret = crtc->funcs->set_config(modeset); - mutex_unlock(&dev->mode_config.mutex); if (!ret) { info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; } } } + mutex_unlock(&dev->mode_config.mutex); return ret; } EXPORT_SYMBOL(drm_fb_helper_pan_display); -int drm_fb_helper_single_fb_probe(struct drm_device *dev, - int preferred_bpp, - int (*fb_create)(struct drm_device *dev, - uint32_t fb_width, - uint32_t fb_height, - uint32_t surface_width, - uint32_t surface_height, - uint32_t surface_depth, - uint32_t surface_bpp, - struct drm_framebuffer **fb_ptr)) +int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, + int preferred_bpp) { - struct drm_crtc *crtc; - struct drm_connector *connector; - unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; - unsigned int surface_width = 0, surface_height = 0; int new_fb = 0; int crtc_count = 0; - int ret, i, conn_count = 0; + int i; struct fb_info *info; - struct drm_framebuffer *fb; - struct drm_mode_set *modeset = NULL; - struct drm_fb_helper *fb_helper; - uint32_t surface_depth = 24, surface_bpp = 32; + struct drm_fb_helper_surface_size sizes; + int gamma_size = 0; + + memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); + sizes.surface_depth = 24; + sizes.surface_bpp = 32; + sizes.fb_width = (unsigned)-1; + sizes.fb_height = (unsigned)-1; /* if driver picks 8 or 16 by default use that for both depth/bpp */ - if (preferred_bpp != surface_bpp) { - surface_depth = surface_bpp = preferred_bpp; + if (preferred_bpp != sizes.surface_bpp) { + sizes.surface_depth = sizes.surface_bpp = preferred_bpp; } /* first up get a count of crtcs now in use and new min/maxes width/heights */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; - + for (i = 0; i < fb_helper->connector_count; i++) { + struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; struct drm_fb_helper_cmdline_mode *cmdline_mode; - if (!fb_help_conn) - continue; - - cmdline_mode = &fb_help_conn->cmdline_mode; + cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->bpp_specified) { switch (cmdline_mode->bpp) { case 8: - surface_depth = surface_bpp = 8; + sizes.surface_depth = sizes.surface_bpp = 8; break; case 15: - surface_depth = 15; - surface_bpp = 16; + sizes.surface_depth = 15; + sizes.surface_bpp = 16; break; case 16: - surface_depth = surface_bpp = 16; + sizes.surface_depth = sizes.surface_bpp = 16; break; case 24: - surface_depth = surface_bpp = 24; + sizes.surface_depth = sizes.surface_bpp = 24; break; case 32: - surface_depth = 24; - surface_bpp = 32; + sizes.surface_depth = 24; + sizes.surface_bpp = 32; break; } break; } } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (drm_helper_crtc_in_use(crtc)) { - if (crtc->desired_mode) { - if (crtc->desired_mode->hdisplay < fb_width) - fb_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay < fb_height) - fb_height = crtc->desired_mode->vdisplay; - - if (crtc->desired_mode->hdisplay > surface_width) - surface_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay > surface_height) - surface_height = crtc->desired_mode->vdisplay; - } + crtc_count = 0; + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_display_mode *desired_mode; + desired_mode = fb_helper->crtc_info[i].desired_mode; + + if (desired_mode) { + if (gamma_size == 0) + gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; + if (desired_mode->hdisplay < sizes.fb_width) + sizes.fb_width = desired_mode->hdisplay; + if (desired_mode->vdisplay < sizes.fb_height) + sizes.fb_height = desired_mode->vdisplay; + if (desired_mode->hdisplay > sizes.surface_width) + sizes.surface_width = desired_mode->hdisplay; + if (desired_mode->vdisplay > sizes.surface_height) + sizes.surface_height = desired_mode->vdisplay; crtc_count++; } } - if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { + if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { /* hmm everyone went away - assume VGA cable just fell out and will come back later. */ - return 0; - } - - /* do we have an fb already? */ - if (list_empty(&dev->mode_config.fb_kernel_list)) { - ret = (*fb_create)(dev, fb_width, fb_height, surface_width, - surface_height, surface_depth, surface_bpp, - &fb); - if (ret) - return -EINVAL; - new_fb = 1; - } else { - fb = list_first_entry(&dev->mode_config.fb_kernel_list, - struct drm_framebuffer, filp_head); - - /* if someone hotplugs something bigger than we have already allocated, we are pwned. - As really we can't resize an fbdev that is in the wild currently due to fbdev - not really being designed for the lower layers moving stuff around under it. - - so in the grand style of things - punt. */ - if ((fb->width < surface_width) || - (fb->height < surface_height)) { - DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); - return -EINVAL; - } + DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); + sizes.fb_width = sizes.surface_width = 1024; + sizes.fb_height = sizes.surface_height = 768; } - info = fb->fbdev; - fb_helper = info->par; + /* push down into drivers */ + new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); + if (new_fb < 0) + return new_fb; - crtc_count = 0; - /* okay we need to setup new connector sets in the crtcs */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - modeset = &fb_helper->crtc_info[crtc_count].mode_set; - modeset->fb = fb; - conn_count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder) - if (connector->encoder->crtc == modeset->crtc) { - modeset->connectors[conn_count] = connector; - conn_count++; - if (conn_count > fb_helper->conn_limit) - BUG(); - } - } + info = fb_helper->fbdev; - for (i = conn_count; i < fb_helper->conn_limit; i++) - modeset->connectors[i] = NULL; - - modeset->crtc = crtc; - crtc_count++; - - modeset->num_connectors = conn_count; - if (modeset->crtc->desired_mode) { - if (modeset->mode) - drm_mode_destroy(dev, modeset->mode); - modeset->mode = drm_mode_duplicate(dev, - modeset->crtc->desired_mode); - } + /* set the fb pointer */ + for (i = 0; i < fb_helper->crtc_count; i++) { + fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; } - fb_helper->crtc_count = crtc_count; - fb_helper->fb = fb; if (new_fb) { info->var.pixclock = 0; - ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0); - if (ret) - return ret; if (register_framebuffer(info) < 0) { - fb_dealloc_cmap(&info->cmap); return -EINVAL; } + + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); + } else { drm_fb_helper_set_par(info); } - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, - info->fix.id); /* Switch back to kernel console on panic */ /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { - printk(KERN_INFO "registered panic notifier\n"); + printk(KERN_INFO "drm: registered panic notifier\n"); atomic_notifier_chain_register(&panic_notifier_list, &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } - list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); + if (new_fb) + list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); + return 0; } EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); -void drm_fb_helper_free(struct drm_fb_helper *helper) -{ - list_del(&helper->kernel_fb_list); - if (list_empty(&kernel_fb_helper_list)) { - printk(KERN_INFO "unregistered panic notifier\n"); - atomic_notifier_chain_unregister(&panic_notifier_list, - &paniced); - unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - } - drm_fb_helper_crtc_free(helper); - fb_dealloc_cmap(&helper->fb->fbdev->cmap); -} -EXPORT_SYMBOL(drm_fb_helper_free); - void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth) { @@ -951,10 +896,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, } EXPORT_SYMBOL(drm_fb_helper_fill_fix); -void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, +void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { - info->pseudo_palette = fb->pseudo_palette; + struct drm_framebuffer *fb = fb_helper->fb; + info->pseudo_palette = fb_helper->pseudo_palette; info->var.xres_virtual = fb->width; info->var.yres_virtual = fb->height; info->var.bits_per_pixel = fb->bits_per_pixel; @@ -1022,3 +968,464 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, info->var.yres = fb_height; } EXPORT_SYMBOL(drm_fb_helper_fill_var); + +static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, + uint32_t maxX, + uint32_t maxY) +{ + struct drm_connector *connector; + int count = 0; + int i; + + for (i = 0; i < fb_helper->connector_count; i++) { + connector = fb_helper->connector_info[i]->connector; + count += connector->funcs->fill_modes(connector, maxX, maxY); + } + + return count; +} + +static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &fb_connector->connector->modes, head) { + if (drm_mode_width(mode) > width || + drm_mode_height(mode) > height) + continue; + if (mode->type & DRM_MODE_TYPE_PREFERRED) + return mode; + } + return NULL; +} + +static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) +{ + struct drm_fb_helper_cmdline_mode *cmdline_mode; + cmdline_mode = &fb_connector->cmdline_mode; + return cmdline_mode->specified; +} + +static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, + int width, int height) +{ + struct drm_fb_helper_cmdline_mode *cmdline_mode; + struct drm_display_mode *mode = NULL; + + cmdline_mode = &fb_helper_conn->cmdline_mode; + if (cmdline_mode->specified == false) + return mode; + + /* attempt to find a matching mode in the list of modes + * we have gotten so far, if not add a CVT mode that conforms + */ + if (cmdline_mode->rb || cmdline_mode->margins) + goto create_mode; + + list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { + /* check width/height */ + if (mode->hdisplay != cmdline_mode->xres || + mode->vdisplay != cmdline_mode->yres) + continue; + + if (cmdline_mode->refresh_specified) { + if (mode->vrefresh != cmdline_mode->refresh) + continue; + } + + if (cmdline_mode->interlace) { + if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) + continue; + } + return mode; + } + +create_mode: + if (cmdline_mode->cvt) + mode = drm_cvt_mode(fb_helper_conn->connector->dev, + cmdline_mode->xres, cmdline_mode->yres, + cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, + cmdline_mode->rb, cmdline_mode->interlace, + cmdline_mode->margins); + else + mode = drm_gtf_mode(fb_helper_conn->connector->dev, + cmdline_mode->xres, cmdline_mode->yres, + cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, + cmdline_mode->interlace, + cmdline_mode->margins); + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + list_add(&mode->head, &fb_helper_conn->connector->modes); + return mode; +} + +static bool drm_connector_enabled(struct drm_connector *connector, bool strict) +{ + bool enable; + + if (strict) { + enable = connector->status == connector_status_connected; + } else { + enable = connector->status != connector_status_disconnected; + } + return enable; +} + +static void drm_enable_connectors(struct drm_fb_helper *fb_helper, + bool *enabled) +{ + bool any_enabled = false; + struct drm_connector *connector; + int i = 0; + + for (i = 0; i < fb_helper->connector_count; i++) { + connector = fb_helper->connector_info[i]->connector; + enabled[i] = drm_connector_enabled(connector, true); + DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, + enabled[i] ? "yes" : "no"); + any_enabled |= enabled[i]; + } + + if (any_enabled) + return; + + for (i = 0; i < fb_helper->connector_count; i++) { + connector = fb_helper->connector_info[i]->connector; + enabled[i] = drm_connector_enabled(connector, false); + } +} + +static bool drm_target_cloned(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + bool *enabled, int width, int height) +{ + int count, i, j; + bool can_clone = false; + struct drm_fb_helper_connector *fb_helper_conn; + struct drm_display_mode *dmt_mode, *mode; + + /* only contemplate cloning in the single crtc case */ + if (fb_helper->crtc_count > 1) + return false; + + count = 0; + for (i = 0; i < fb_helper->connector_count; i++) { + if (enabled[i]) + count++; + } + + /* only contemplate cloning if more than one connector is enabled */ + if (count <= 1) + return false; + + /* check the command line or if nothing common pick 1024x768 */ + can_clone = true; + for (i = 0; i < fb_helper->connector_count; i++) { + if (!enabled[i]) + continue; + fb_helper_conn = fb_helper->connector_info[i]; + modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); + if (!modes[i]) { + can_clone = false; + break; + } + for (j = 0; j < i; j++) { + if (!enabled[j]) + continue; + if (!drm_mode_equal(modes[j], modes[i])) + can_clone = false; + } + } + + if (can_clone) { + DRM_DEBUG_KMS("can clone using command line\n"); + return true; + } + + /* try and find a 1024x768 mode on each connector */ + can_clone = true; + dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); + + for (i = 0; i < fb_helper->connector_count; i++) { + + if (!enabled[i]) + continue; + + fb_helper_conn = fb_helper->connector_info[i]; + list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { + if (drm_mode_equal(mode, dmt_mode)) + modes[i] = mode; + } + if (!modes[i]) + can_clone = false; + } + + if (can_clone) { + DRM_DEBUG_KMS("can clone using 1024x768\n"); + return true; + } + DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); + return false; +} + +static bool drm_target_preferred(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + bool *enabled, int width, int height) +{ + struct drm_fb_helper_connector *fb_helper_conn; + int i; + + for (i = 0; i < fb_helper->connector_count; i++) { + fb_helper_conn = fb_helper->connector_info[i]; + + if (enabled[i] == false) + continue; + + DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", + fb_helper_conn->connector->base.id); + + /* got for command line mode first */ + modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); + if (!modes[i]) { + DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", + fb_helper_conn->connector->base.id); + modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); + } + /* No preferred modes, pick one off the list */ + if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { + list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) + break; + } + DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : + "none"); + } + return true; +} + +static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **best_crtcs, + struct drm_display_mode **modes, + int n, int width, int height) +{ + int c, o; + struct drm_device *dev = fb_helper->dev; + struct drm_connector *connector; + struct drm_connector_helper_funcs *connector_funcs; + struct drm_encoder *encoder; + struct drm_fb_helper_crtc *best_crtc; + int my_score, best_score, score; + struct drm_fb_helper_crtc **crtcs, *crtc; + struct drm_fb_helper_connector *fb_helper_conn; + + if (n == fb_helper->connector_count) + return 0; + + fb_helper_conn = fb_helper->connector_info[n]; + connector = fb_helper_conn->connector; + + best_crtcs[n] = NULL; + best_crtc = NULL; + best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); + if (modes[n] == NULL) + return best_score; + + crtcs = kzalloc(dev->mode_config.num_connector * + sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); + if (!crtcs) + return best_score; + + my_score = 1; + if (connector->status == connector_status_connected) + my_score++; + if (drm_has_cmdline_mode(fb_helper_conn)) + my_score++; + if (drm_has_preferred_mode(fb_helper_conn, width, height)) + my_score++; + + connector_funcs = connector->helper_private; + encoder = connector_funcs->best_encoder(connector); + if (!encoder) + goto out; + + /* select a crtc for this connector and then attempt to configure + remaining connectors */ + for (c = 0; c < fb_helper->crtc_count; c++) { + crtc = &fb_helper->crtc_info[c]; + + if ((encoder->possible_crtcs & (1 << c)) == 0) { + continue; + } + + for (o = 0; o < n; o++) + if (best_crtcs[o] == crtc) + break; + + if (o < n) { + /* ignore cloning unless only a single crtc */ + if (fb_helper->crtc_count > 1) + continue; + + if (!drm_mode_equal(modes[o], modes[n])) + continue; + } + + crtcs[n] = crtc; + memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); + score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, + width, height); + if (score > best_score) { + best_crtc = crtc; + best_score = score; + memcpy(best_crtcs, crtcs, + dev->mode_config.num_connector * + sizeof(struct drm_fb_helper_crtc *)); + } + } +out: + kfree(crtcs); + return best_score; +} + +static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_fb_helper_crtc **crtcs; + struct drm_display_mode **modes; + struct drm_encoder *encoder; + struct drm_mode_set *modeset; + bool *enabled; + int width, height; + int i, ret; + + DRM_DEBUG_KMS("\n"); + + width = dev->mode_config.max_width; + height = dev->mode_config.max_height; + + /* clean out all the encoder/crtc combos */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder->crtc = NULL; + } + + crtcs = kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); + modes = kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_display_mode *), GFP_KERNEL); + enabled = kcalloc(dev->mode_config.num_connector, + sizeof(bool), GFP_KERNEL); + + drm_enable_connectors(fb_helper, enabled); + + ret = drm_target_cloned(fb_helper, modes, enabled, width, height); + if (!ret) { + ret = drm_target_preferred(fb_helper, modes, enabled, width, height); + if (!ret) + DRM_ERROR("Unable to find initial modes\n"); + } + + DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); + + drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); + + /* need to set the modesets up here for use later */ + /* fill out the connector<->crtc mappings into the modesets */ + for (i = 0; i < fb_helper->crtc_count; i++) { + modeset = &fb_helper->crtc_info[i].mode_set; + modeset->num_connectors = 0; + } + + for (i = 0; i < fb_helper->connector_count; i++) { + struct drm_display_mode *mode = modes[i]; + struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; + modeset = &fb_crtc->mode_set; + + if (mode && fb_crtc) { + DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", + mode->name, fb_crtc->mode_set.crtc->base.id); + fb_crtc->desired_mode = mode; + if (modeset->mode) + drm_mode_destroy(dev, modeset->mode); + modeset->mode = drm_mode_duplicate(dev, + fb_crtc->desired_mode); + modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; + } + } + + kfree(crtcs); + kfree(modes); + kfree(enabled); +} + +/** + * drm_helper_initial_config - setup a sane initial connector configuration + * @dev: DRM device + * + * LOCKING: + * Called at init time, must take mode config lock. + * + * Scan the CRTCs and connectors and try to put together an initial setup. + * At the moment, this is a cloned configuration across all heads with + * a new framebuffer object as the backing store. + * + * RETURNS: + * Zero if everything went ok, nonzero otherwise. + */ +bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) +{ + struct drm_device *dev = fb_helper->dev; + int count = 0; + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(fb_helper->dev); + + drm_fb_helper_parse_command_line(fb_helper); + + count = drm_fb_helper_probe_connector_modes(fb_helper, + dev->mode_config.max_width, + dev->mode_config.max_height); + /* + * we shouldn't end up with no modes here. + */ + if (count == 0) { + printk(KERN_INFO "No connectors reported connected with modes\n"); + } + drm_setup_crtcs(fb_helper); + + return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); +} +EXPORT_SYMBOL(drm_fb_helper_initial_config); + +bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + int count = 0; + u32 max_width, max_height, bpp_sel; + bool bound = false, crtcs_bound = false; + struct drm_crtc *crtc; + + if (!fb_helper->fb) + return false; + + list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { + if (crtc->fb) + crtcs_bound = true; + if (crtc->fb == fb_helper->fb) + bound = true; + } + + if (!bound && crtcs_bound) { + fb_helper->delayed_hotplug = true; + return false; + } + DRM_DEBUG_KMS("\n"); + + max_width = fb_helper->fb->width; + max_height = fb_helper->fb->height; + bpp_sel = fb_helper->fb->bits_per_pixel; + + count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, + max_height); + drm_setup_crtcs(fb_helper); + + return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); +} +EXPORT_SYMBOL(drm_fb_helper_hotplug_event); + diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 08d14df3bb42..e7aace20981f 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -36,6 +36,7 @@ #include "drmP.h" #include <linux/poll.h> +#include <linux/slab.h> #include <linux/smp_lock.h> static int drm_open_helper(struct inode *inode, struct file *filp, @@ -140,14 +141,16 @@ int drm_open(struct inode *inode, struct file *filp) spin_unlock(&dev->count_lock); } out: - mutex_lock(&dev->struct_mutex); - if (minor->type == DRM_MINOR_LEGACY) { - BUG_ON((dev->dev_mapping != NULL) && - (dev->dev_mapping != inode->i_mapping)); - if (dev->dev_mapping == NULL) - dev->dev_mapping = inode->i_mapping; + if (!retcode) { + mutex_lock(&dev->struct_mutex); + if (minor->type == DRM_MINOR_LEGACY) { + if (dev->dev_mapping == NULL) + dev->dev_mapping = inode->i_mapping; + else if (dev->dev_mapping != inode->i_mapping) + retcode = -ENODEV; + } + mutex_unlock(&dev->struct_mutex); } - mutex_unlock(&dev->struct_mutex); return retcode; } @@ -240,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp, DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); - priv = kmalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - memset(priv, 0, sizeof(*priv)); filp->private_data = priv; priv->filp = filp; priv->uid = current_euid(); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index aa89d4b0b4c4..33dad3fa6043 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev) } /** + * Initialize an already allocate GEM object of the specified size with + * shmfs backing store. + */ +int drm_gem_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size) +{ + BUG_ON((size & (PAGE_SIZE - 1)) != 0); + + obj->dev = dev; + obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(obj->filp)) + return -ENOMEM; + + kref_init(&obj->refcount); + kref_init(&obj->handlecount); + obj->size = size; + + atomic_inc(&dev->object_count); + atomic_add(obj->size, &dev->object_memory); + + return 0; +} +EXPORT_SYMBOL(drm_gem_object_init); + +/** * Allocate a GEM object of the specified size with shmfs backing store */ struct drm_gem_object * @@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; - BUG_ON((size & (PAGE_SIZE - 1)) != 0); - obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) goto free; - obj->dev = dev; - obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); - if (IS_ERR(obj->filp)) + if (drm_gem_object_init(dev, obj, size) != 0) goto free; - kref_init(&obj->refcount); - kref_init(&obj->handlecount); - obj->size = size; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { goto fput; } - atomic_inc(&dev->object_count); - atomic_add(obj->size, &dev->object_memory); return obj; fput: + /* Object_init mangles the global counters - readjust them. */ + atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); fput(obj->filp); free: kfree(obj); @@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) idr_destroy(&file_private->object_idr); } -static void -drm_gem_object_free_common(struct drm_gem_object *obj) +void +drm_gem_object_release(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; fput(obj->filp); atomic_dec(&dev->object_count); atomic_sub(obj->size, &dev->object_memory); - kfree(obj); } +EXPORT_SYMBOL(drm_gem_object_release); /** * Called after the last reference to the object has been lost. @@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref) if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); - - drm_gem_object_free_common(obj); } EXPORT_SYMBOL(drm_gem_object_free); @@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref) dev->driver->gem_free_object(obj); mutex_unlock(&dev->struct_mutex); } - - drm_gem_object_free_common(obj); } EXPORT_SYMBOL(drm_gem_object_free_unlocked); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index f36b21c5b2e1..a93d7b4ddaa6 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -35,6 +35,7 @@ #include "drmP.h" #include "drm_hashtab.h" #include <linux/hash.h> +#include <linux/slab.h> int drm_ht_create(struct drm_open_hash *ht, unsigned int order) { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b98384dbd9a7..a263b7070fc6 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -36,6 +36,7 @@ #include "drmP.h" #include <linux/interrupt.h> /* For task queue support */ +#include <linux/slab.h> #include <linux/vgaarb.h> /** @@ -475,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) unsigned long irqflags; spin_lock_irqsave(&dev->vbl_lock, irqflags); + dev->driver->disable_vblank(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); dev->vblank_enabled[crtc] = 0; dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index e4865f99989c..7732268eced2 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size, && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) break; - if (!agpmem) + if (&agpmem->head == &dev->agp->memory) return NULL; /* diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 76d63394c776..f1f473ea97d3 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; /* 18/16. Find actual vertical frame frequency */ /* ignore - just set the mode flag for interlaced */ - if (interlaced) + if (interlaced) { drm_mode->vtotal *= 2; + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } /* Fill the mode line name */ drm_mode_set_name(drm_mode); if (reduced) @@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, else drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NHSYNC); - if (interlaced) - drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; - return drm_mode; + return drm_mode; } EXPORT_SYMBOL(drm_cvt_mode); /** - * drm_gtf_mode - create the modeline based on GTF algorithm + * drm_gtf_mode_complex - create the modeline based on full GTF algorithm * * @dev :drm device * @hdisplay :hdisplay size * @vdisplay :vdisplay size * @vrefresh :vrefresh rate. * @interlaced :whether the interlace is supported - * @margins :whether the margin is supported + * @margins :desired margin size + * @GTF_[MCKJ] :extended GTF formula parameters * * LOCKING. * none. * - * return the modeline based on GTF algorithm - * - * This function is to create the modeline based on the GTF algorithm. - * Generalized Timing Formula is derived from: - * GTF Spreadsheet by Andy Morrish (1/5/97) - * available at http://www.vesa.org + * return the modeline based on full GTF algorithm. * - * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. - * What I have done is to translate it by using integer calculation. - * I also refer to the function of fb_get_mode in the file of - * drivers/video/fbmon.c + * GTF feature blocks specify C and J in multiples of 0.5, so we pass them + * in here multiplied by two. For a C of 40, pass in 80. */ -struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, - int vdisplay, int vrefresh, - bool interlaced, int margins) -{ - /* 1) top/bottom margin size (% of height) - default: 1.8, */ +struct drm_display_mode * +drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, + int vrefresh, bool interlaced, int margins, + int GTF_M, int GTF_2C, int GTF_K, int GTF_2J) +{ /* 1) top/bottom margin size (% of height) - default: 1.8, */ #define GTF_MARGIN_PERCENTAGE 18 /* 2) character cell horizontal granularity (pixels) - default 8 */ #define GTF_CELL_GRAN 8 @@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, #define H_SYNC_PERCENT 8 /* min time of vsync + back porch (microsec) */ #define MIN_VSYNC_PLUS_BP 550 - /* blanking formula gradient */ -#define GTF_M 600 - /* blanking formula offset */ -#define GTF_C 40 - /* blanking formula scaling factor */ -#define GTF_K 128 - /* blanking formula scaling factor */ -#define GTF_J 20 /* C' and M' are part of the Blanking Duty Cycle computation */ -#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) -#define GTF_M_PRIME (GTF_K * GTF_M / 256) +#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2) +#define GTF_M_PRIME (GTF_K * GTF_M / 256) struct drm_display_mode *drm_mode; unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; int top_margin, bottom_margin; @@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, drm_mode->clock = pixel_freq; - drm_mode_set_name(drm_mode); - drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; - if (interlaced) { drm_mode->vtotal *= 2; drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; } + drm_mode_set_name(drm_mode); + if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40) + drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; + else + drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC; + return drm_mode; } +EXPORT_SYMBOL(drm_gtf_mode_complex); + +/** + * drm_gtf_mode - create the modeline based on GTF algorithm + * + * @dev :drm device + * @hdisplay :hdisplay size + * @vdisplay :vdisplay size + * @vrefresh :vrefresh rate. + * @interlaced :whether the interlace is supported + * @margins :whether the margin is supported + * + * LOCKING. + * none. + * + * return the modeline based on GTF algorithm + * + * This function is to create the modeline based on the GTF algorithm. + * Generalized Timing Formula is derived from: + * GTF Spreadsheet by Andy Morrish (1/5/97) + * available at http://www.vesa.org + * + * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. + * What I have done is to translate it by using integer calculation. + * I also refer to the function of fb_get_mode in the file of + * drivers/video/fbmon.c + * + * Standard GTF parameters: + * M = 600 + * C = 40 + * K = 128 + * J = 20 + */ +struct drm_display_mode * +drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, + bool lace, int margins) +{ + return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, + margins, 600, 40 * 2, 128, 20 * 2); +} EXPORT_SYMBOL(drm_gtf_mode); + /** * drm_mode_set_name - set the name on a mode * @mode: name will be set in this mode @@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode); */ void drm_mode_set_name(struct drm_display_mode *mode) { - snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, - mode->vdisplay); + bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + + snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", + mode->hdisplay, mode->vdisplay, + interlaced ? "i" : ""); } EXPORT_SYMBOL(drm_mode_set_name); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index e68ebf92fa2a..2ea9ad4a8d69 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -37,6 +37,7 @@ */ #include <linux/pci.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include "drmP.h" diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index d379c4f2892f..a9ba6b69ad35 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -38,6 +38,7 @@ */ #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" /*************************************************** diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index c7823c863d4f..9034c4c6100d 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -32,6 +32,7 @@ */ #include <linux/vmalloc.h> +#include <linux/slab.h> #include "drmP.h" #define DEBUG_SCATTER 0 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index ad73e141afdb..a0c365f2e521 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/slab.h> #include "drmP.h" #include "drm_core.h" @@ -515,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) } driver = dev->driver; - drm_vblank_cleanup(dev); - drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && @@ -536,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) dev->agp = NULL; } + drm_vblank_cleanup(dev); + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 014ce24761b9..101d381e9d86 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -14,6 +14,7 @@ #include <linux/device.h> #include <linux/kdev_t.h> +#include <linux/gfp.h> #include <linux/err.h> #include "drm_sysfs.h" @@ -192,8 +193,9 @@ static ssize_t enabled_show(struct device *device, "disabled"); } -static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static ssize_t edid_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) { struct device *connector_dev = container_of(kobj, struct device, kobj); struct drm_connector *connector = to_drm_connector(connector_dev); @@ -332,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = { static struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0444, - .size = 128, + .size = 0, .read = edid_show, }; @@ -353,7 +355,10 @@ static struct bin_attribute edid_attr = { int drm_sysfs_connector_add(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - int ret = 0, i, j; + int attr_cnt = 0; + int opt_cnt = 0; + int i; + int ret = 0; /* We shouldn't get called more than once for the same connector */ BUG_ON(device_is_registered(&connector->kdev)); @@ -376,8 +381,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector) /* Standard attributes */ - for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) { - ret = device_create_file(&connector->kdev, &connector_attrs[i]); + for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { + ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]); if (ret) goto err_out_files; } @@ -393,8 +398,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector) case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: case DRM_MODE_CONNECTOR_TV: - for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { - ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); + for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { + ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]); if (ret) goto err_out_files; } @@ -413,10 +418,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector) return 0; err_out_files: - if (i > 0) - for (j = 0; j < i; j++) - device_remove_file(&connector->kdev, - &connector_attrs[i]); + for (i = 0; i < opt_cnt; i++) + device_remove_file(&connector->kdev, &connector_attrs_opt1[i]); + for (i = 0; i < attr_cnt; i++) + device_remove_file(&connector->kdev, &connector_attrs[i]); device_unregister(&connector->kdev); out: diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 4ac900f4647f..c3b13fb41d0c 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -36,6 +36,7 @@ #include "drmP.h" #if defined(__ia64__) #include <linux/efi.h> +#include <linux/slab.h> #endif static void drm_vm_open(struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index de32d22a8c39..997d91707ad2 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -36,6 +36,7 @@ #include "i810_drv.h" #include <linux/interrupt.h> /* For task queue support */ #include <linux/delay.h> +#include <linux/slab.h> #include <linux/pagemap.h> #define I810_BUF_FREE 2 diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 06bd732e6463..65759a9a85c8 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -38,6 +38,7 @@ #include <linux/interrupt.h> /* For task queue support */ #include <linux/pagemap.h> #include <linux/delay.h> +#include <linux/slab.h> #include <asm/uaccess.h> #define I830_BUF_FREE 2 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 9929f84ec3e1..da78f2c0d909 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_fb.o \ intel_tv.o \ intel_dvo.o \ + intel_ringbuffer.o \ intel_overlay.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ @@ -33,3 +34,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o i915-$(CONFIG_COMPAT) += i915_ioc32.o obj-$(CONFIG_DRM_I915) += i915.o + +CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 288fc50627e2..0d6ff640e1c6 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -70,16 +70,6 @@ struct intel_dvo_dev_ops { void (*dpms)(struct intel_dvo_device *dvo, int mode); /* - * Saves the output's state for restoration on VT switch. - */ - void (*save)(struct intel_dvo_device *dvo); - - /* - * Restore's the output's state at VT switch. - */ - void (*restore)(struct intel_dvo_device *dvo); - - /* * Callback for testing a video mode for a given output. * * This function should only check for cases where a mode can't diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 1184c14ba87d..14d59804acd7 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -159,16 +159,7 @@ #define CH7017_BANG_LIMIT_CONTROL 0x7f struct ch7017_priv { - uint8_t save_hapi; - uint8_t save_vali; - uint8_t save_valo; - uint8_t save_ailo; - uint8_t save_lvds_pll_vco; - uint8_t save_feedback_div; - uint8_t save_lvds_control_2; - uint8_t save_outputs_enable; - uint8_t save_lvds_power_down; - uint8_t save_power_management; + uint8_t dummy; }; static void ch7017_dump_regs(struct intel_dvo_device *dvo); @@ -401,39 +392,6 @@ do { \ DUMP(CH7017_LVDS_POWER_DOWN); } -static void ch7017_save(struct intel_dvo_device *dvo) -{ - struct ch7017_priv *priv = dvo->dev_priv; - - ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); - ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); - ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); - ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); - ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); - ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); - ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); - ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); - ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); -} - -static void ch7017_restore(struct intel_dvo_device *dvo) -{ - struct ch7017_priv *priv = dvo->dev_priv; - - /* Power down before changing mode */ - ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); - - ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); - ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); - ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); - ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); - ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); - ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); - ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); - ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); - ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); -} - static void ch7017_destroy(struct intel_dvo_device *dvo) { struct ch7017_priv *priv = dvo->dev_priv; @@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = { .mode_set = ch7017_mode_set, .dpms = ch7017_dpms, .dump_regs = ch7017_dump_regs, - .save = ch7017_save, - .restore = ch7017_restore, .destroy = ch7017_destroy, }; diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d56ff5cc22b2..6f1944b24441 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -92,21 +92,10 @@ static struct ch7xxx_id_struct { { CH7301_VID, "CH7301" }, }; -struct ch7xxx_reg_state { - uint8_t regs[CH7xxx_NUM_REGS]; -}; - struct ch7xxx_priv { bool quiet; - - struct ch7xxx_reg_state save_reg; - struct ch7xxx_reg_state mode_reg; - uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; - uint8_t save_TLPF, save_TCT, save_PM, save_IDF; }; -static void ch7xxx_save(struct intel_dvo_device *dvo); - static char *ch7xxx_get_id(uint8_t vid) { int i; @@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) { - struct ch7xxx_priv *ch7xxx = dvo->dev_priv; int i; for (i = 0; i < CH7xxx_NUM_REGS; i++) { + uint8_t val; if ((i % 8) == 0 ) DRM_LOG_KMS("\n %02X: ", i); - DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); + ch7xxx_readb(dvo, i, &val); + DRM_LOG_KMS("%02X ", val); } } -static void ch7xxx_save(struct intel_dvo_device *dvo) -{ - struct ch7xxx_priv *ch7xxx= dvo->dev_priv; - - ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); - ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); - ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); - ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); - ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); - ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); - ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); -} - -static void ch7xxx_restore(struct intel_dvo_device *dvo) -{ - struct ch7xxx_priv *ch7xxx = dvo->dev_priv; - - ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); - ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); - ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); - ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); - ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); - ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); - ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); -} - static void ch7xxx_destroy(struct intel_dvo_device *dvo) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; @@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = { .mode_set = ch7xxx_mode_set, .dpms = ch7xxx_dpms, .dump_regs = ch7xxx_dump_regs, - .save = ch7xxx_save, - .restore = ch7xxx_restore, .destroy = ch7xxx_destroy, }; diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 24169e528f0f..a2ec3f487202 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -153,9 +153,6 @@ struct ivch_priv { bool quiet; uint16_t width, height; - - uint16_t save_VR01; - uint16_t save_VR40; }; @@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) DRM_LOG_KMS("VR8F: 0x%04x\n", val); } -static void ivch_save(struct intel_dvo_device *dvo) -{ - struct ivch_priv *priv = dvo->dev_priv; - - ivch_read(dvo, VR01, &priv->save_VR01); - ivch_read(dvo, VR40, &priv->save_VR40); -} - -static void ivch_restore(struct intel_dvo_device *dvo) -{ - struct ivch_priv *priv = dvo->dev_priv; - - ivch_write(dvo, VR01, priv->save_VR01); - ivch_write(dvo, VR40, priv->save_VR40); -} - static void ivch_destroy(struct intel_dvo_device *dvo) { struct ivch_priv *priv = dvo->dev_priv; @@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) struct intel_dvo_dev_ops ivch_ops= { .init = ivch_init, .dpms = ivch_dpms, - .save = ivch_save, - .restore = ivch_restore, .mode_valid = ivch_mode_valid, .mode_set = ivch_mode_set, .detect = ivch_detect, diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 0001c13f0a80..9b8e6765cf26 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define SIL164_REGC 0x0c -struct sil164_save_rec { - uint8_t reg8; - uint8_t reg9; - uint8_t regc; -}; - struct sil164_priv { //I2CDevRec d; bool quiet; - struct sil164_save_rec save_regs; - struct sil164_save_rec mode_regs; }; #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) @@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); } -static void sil164_save(struct intel_dvo_device *dvo) -{ - struct sil164_priv *sil= dvo->dev_priv; - - if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) - return; - - if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) - return; - - if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) - return; - - return; -} - -static void sil164_restore(struct intel_dvo_device *dvo) -{ - struct sil164_priv *sil = dvo->dev_priv; - - /* Restore it powered down initially */ - sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); - - sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); - sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); - sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); -} - static void sil164_destroy(struct intel_dvo_device *dvo) { struct sil164_priv *sil = dvo->dev_priv; @@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = { .mode_set = sil164_mode_set, .dpms = sil164_dpms, .dump_regs = sil164_dump_regs, - .save = sil164_save, - .restore = sil164_restore, .destroy = sil164_destroy, }; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index c7c391bc116a..56f66426207f 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -86,16 +86,8 @@ #define TFP410_V_RES_LO 0x3C #define TFP410_V_RES_HI 0x3D -struct tfp410_save_rec { - uint8_t ctl1; - uint8_t ctl2; -}; - struct tfp410_priv { bool quiet; - - struct tfp410_save_rec saved_reg; - struct tfp410_save_rec mode_reg; }; static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) @@ -216,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) uint8_t ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { - if (ctl2 & TFP410_CTL_2_HTPLG) + if (ctl2 & TFP410_CTL_2_RSEN) ret = connector_status_connected; else ret = connector_status_disconnected; @@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } -static void tfp410_save(struct intel_dvo_device *dvo) -{ - struct tfp410_priv *tfp = dvo->dev_priv; - - if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) - return; - - if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) - return; -} - -static void tfp410_restore(struct intel_dvo_device *dvo) -{ - struct tfp410_priv *tfp = dvo->dev_priv; - - /* Restore it powered down initially */ - tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); - - tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); - tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); -} - static void tfp410_destroy(struct intel_dvo_device *dvo) { struct tfp410_priv *tfp = dvo->dev_priv; @@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = { .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, .dump_regs = tfp410_dump_regs, - .save = tfp410_save, - .restore = tfp410_restore, .destroy = tfp410_destroy, }; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1376dfe44c95..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -28,6 +28,7 @@ #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -76,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) case ACTIVE_LIST: seq_printf(m, "Active:\n"); lock = &dev_priv->mm.active_list_lock; - head = &dev_priv->mm.active_list; + head = &dev_priv->render_ring.active_list; break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); @@ -95,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) spin_lock(lock); list_for_each_entry(obj_priv, head, list) { - struct drm_gem_object *obj = obj_priv->obj; - seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", - obj, + &obj_priv->base, get_pin_flag(obj_priv), - obj->size, - obj->read_domains, obj->write_domain, + obj_priv->base.size, + obj_priv->base.read_domains, + obj_priv->base.write_domain, obj_priv->last_rendering_seqno, obj_priv->dirty ? " dirty" : "", obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); - if (obj->name) - seq_printf(m, " (name: %d)", obj->name); + if (obj_priv->base.name) + seq_printf(m, " (name: %d)", obj_priv->base.name); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj_priv->fence_reg); if (obj_priv->gtt_space != NULL) @@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_i915_gem_request *gem_request; seq_printf(m, "Request:\n"); - list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { + list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, + list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); @@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - if (dev_priv->hw_status_page != NULL) { + if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev)); + i915_get_gem_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - if (dev_priv->hw_status_page != NULL) { + if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev)); + i915_get_gem_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -225,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) } else { struct drm_i915_gem_object *obj_priv; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); seq_printf(m, "Fenced object[%2d] = %p: %s " "%08x %08zx %08x %s %08x %08x %d", i, obj, get_pin_flag(obj_priv), @@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data) int i; volatile u32 *hws; - hws = (volatile u32 *)dev_priv->hw_status_page; + hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; if (hws == NULL) return 0; @@ -287,8 +288,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) spin_lock(&dev_priv->mm.active_list_lock); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { - obj = obj_priv->obj; + list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, + list) { + obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { ret = i915_gem_object_get_pages(obj, 0); if (ret) { @@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) u8 *virt; uint32_t *ptr, off; - if (!dev_priv->ring.ring_obj) { + if (!dev_priv->render_ring.gem_object) { seq_printf(m, "No ringbuffer setup\n"); return 0; } - virt = dev_priv->ring.virtual_start; + virt = dev_priv->render_ring.virtual_start; - for (off = 0; off < dev_priv->ring.Size; off += 4) { + for (off = 0; off < dev_priv->render_ring.size; off += 4) { ptr = (uint32_t *)(virt + off); seq_printf(m, "%08x : %08x\n", off, *ptr); } @@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, "RingHead : %08x\n", head); seq_printf(m, "RingTail : %08x\n", tail); - seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); + seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); return 0; @@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvstat = I915_READ16(MEMSTAT_ILK); - seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); - seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); - seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, - rgvswctl & 0x3f); + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); return 0; } @@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) for (i = 0; i < 16; i++) { delayfreq = I915_READ(PXVFREQ_BASE + i * 4); - seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); + seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, + (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); } return 0; @@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 rgvmodectl = I915_READ(MEMMODECTL); + u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); + u16 crstandvid = I915_READ16(CRSTANDVID); seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? "yes" : "no"); @@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused) rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); seq_printf(m, "Starting frequency: P%d\n", (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); - seq_printf(m, "Max frequency: P%d\n", + seq_printf(m, "Max P-state: P%d\n", (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); - seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); + seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); + seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); + seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); + seq_printf(m, "Render standby enabled: %s\n", + (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); return 0; } @@ -566,23 +578,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_crtc *crtc; drm_i915_private_t *dev_priv = dev->dev_private; - bool fbc_enabled = false; - if (!dev_priv->display.fbc_enabled) { + if (!I915_HAS_FBC(dev)) { seq_printf(m, "FBC unsupported on this chipset\n"); return 0; } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (!crtc->enabled) - continue; - if (dev_priv->display.fbc_enabled(crtc)) - fbc_enabled = true; - } - - if (fbc_enabled) { + if (intel_fbc_enabled(dev)) { seq_printf(m, "FBC enabled\n"); } else { seq_printf(m, "FBC disabled: "); @@ -602,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) case FBC_NOT_TILED: seq_printf(m, "scanout buffer not tiled"); break; + case FBC_MULTIPLE_PIPES: + seq_printf(m, "multiple pipes are enabled"); + break; default: seq_printf(m, "unknown reason"); } @@ -617,7 +623,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) + if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; @@ -630,6 +636,36 @@ static int i915_sr_status(struct seq_file *m, void *unused) return 0; } +static int i915_emon_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long temp, chipset, gfx; + + temp = i915_mch_val(dev_priv); + chipset = i915_chipset_val(dev_priv); + gfx = i915_gfx_val(dev_priv); + + seq_printf(m, "GMCH temp: %ld\n", temp); + seq_printf(m, "Chipset power: %ld\n", chipset); + seq_printf(m, "GFX power: %ld\n", gfx); + seq_printf(m, "Total power: %ld\n", chipset + gfx); + + return 0; +} + +static int i915_gfxec(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); + + return 0; +} + static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -752,6 +788,8 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_delayfreq_table", i915_delayfreq_table, 0}, {"i915_inttoext_table", i915_inttoext_table, 0}, {"i915_drpc_info", i915_drpc_info, 0}, + {"i915_emon_status", i915_emon_status, 0}, + {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_sr_status", i915_sr_status, 0}, }; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8bfc0bbf13e6..2305a1234f1e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -38,84 +38,7 @@ #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/vga_switcheroo.h> - -/* Really want an OS-independent resettable timer. Would like to have - * this loop run for (eg) 3 sec, but have the timer reset every time - * the head pointer changes, so that EBUSY only happens if the ring - * actually stalls for (eg) 3 seconds. - */ -int i915_wait_ring(struct drm_device * dev, int n, const char *caller) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_ring_buffer_t *ring = &(dev_priv->ring); - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; - u32 last_acthd = I915_READ(acthd_reg); - u32 acthd; - u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - int i; - - trace_i915_ring_wait_begin (dev); - - for (i = 0; i < 100000; i++) { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - acthd = I915_READ(acthd_reg); - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->Size; - if (ring->space >= n) { - trace_i915_ring_wait_end (dev); - return 0; - } - - if (dev->primary->master) { - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - } - - - if (ring->head != last_head) - i = 0; - if (acthd != last_acthd) - i = 0; - - last_head = ring->head; - last_acthd = acthd; - msleep_interruptible(10); - - } - - trace_i915_ring_wait_end (dev); - return -EBUSY; -} - -/* As a ringbuffer is only allowed to wrap between instructions, fill - * the tail with NOOPs. - */ -int i915_wrap_ring(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - volatile unsigned int *virt; - int rem; - - rem = dev_priv->ring.Size - dev_priv->ring.tail; - if (dev_priv->ring.space < rem) { - int ret = i915_wait_ring(dev, rem, __func__); - if (ret) - return ret; - } - dev_priv->ring.space -= rem; - - virt = (unsigned int *) - (dev_priv->ring.virtual_start + dev_priv->ring.tail); - rem /= 4; - while (rem--) - *virt++ = MI_NOOP; - - dev_priv->ring.tail = 0; - - return 0; -} +#include <linux/slab.h> /** * Sets up the hardware status page for devices that need a physical address @@ -132,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); if (IS_I965G(dev)) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & @@ -158,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev) dev_priv->status_page_dmah = NULL; } - if (dev_priv->status_gfx_addr) { - dev_priv->status_gfx_addr = 0; + if (dev_priv->render_ring.status_page.gfx_addr) { + dev_priv->render_ring.status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } @@ -171,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - drm_i915_ring_buffer_t *ring = &(dev_priv->ring); + struct intel_ring_buffer *ring = &dev_priv->render_ring; /* * We should never lose context on the ring with modesetting @@ -184,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) - ring->space += ring->Size; + ring->space += ring->size; if (!dev->primary->master) return; @@ -204,12 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev) if (dev->irq_enabled) drm_irq_uninstall(dev); - if (dev_priv->ring.virtual_start) { - drm_core_ioremapfree(&dev_priv->ring.map, dev); - dev_priv->ring.virtual_start = NULL; - dev_priv->ring.map.handle = NULL; - dev_priv->ring.map.size = 0; - } + mutex_lock(&dev->struct_mutex); + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); + if (HAS_BSD(dev)) + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) @@ -232,24 +155,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->ring.ring_obj != NULL) { + if (dev_priv->render_ring.gem_object != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - dev_priv->ring.Size = init->ring_size; + dev_priv->render_ring.size = init->ring_size; - dev_priv->ring.map.offset = init->ring_start; - dev_priv->ring.map.size = init->ring_size; - dev_priv->ring.map.type = 0; - dev_priv->ring.map.flags = 0; - dev_priv->ring.map.mtrr = 0; + dev_priv->render_ring.map.offset = init->ring_start; + dev_priv->render_ring.map.size = init->ring_size; + dev_priv->render_ring.map.type = 0; + dev_priv->render_ring.map.flags = 0; + dev_priv->render_ring.map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->ring.map, dev); + drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); - if (dev_priv->ring.map.handle == NULL) { + if (dev_priv->render_ring.map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -257,7 +180,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; @@ -277,26 +200,29 @@ static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring; DRM_DEBUG_DRIVER("%s\n", __func__); - if (dev_priv->ring.map.handle == NULL) { + ring = &dev_priv->render_ring; + + if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ - if (!dev_priv->hw_status_page) { + if (!ring->status_page.page_addr) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG_DRIVER("hw status page @ %p\n", - dev_priv->hw_status_page); - - if (dev_priv->status_gfx_addr != 0) - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + ring->status_page.page_addr); + if (ring->status_page.gfx_addr != 0) + ring->setup_status_page(dev, ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; @@ -406,9 +332,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; - RING_LOCALS; - if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) + if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) return -EINVAL; BEGIN_LP_RING((dwords+1)&~1); @@ -441,9 +366,7 @@ i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_clip_rect box = boxes[i]; - RING_LOCALS; if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", @@ -480,7 +403,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; dev_priv->counter++; if (dev_priv->counter > 0x7FFFFFFFUL) @@ -534,10 +456,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { - drm_i915_private_t *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i = 0, count; - RING_LOCALS; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); @@ -586,7 +506,6 @@ static int i915_dispatch_flip(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; if (!master_priv->sarea_priv) return -EINVAL; @@ -639,7 +558,8 @@ static int i915_quiescent(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); - return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); + return intel_wait_ring_buffer(dev, &dev_priv->render_ring, + dev_priv->render_ring.size - 8); } static int i915_flush_ioctl(struct drm_device *dev, void *data, @@ -826,6 +746,9 @@ static int i915_getparam(struct drm_device *dev, void *data, /* depends on GEM */ value = dev_priv->has_gem; break; + case I915_PARAM_HAS_BSD: + value = HAS_BSD(dev); + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -881,6 +804,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; + struct intel_ring_buffer *ring = &dev_priv->render_ring; if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; @@ -897,7 +821,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); - dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); + ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; @@ -908,19 +832,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data, drm_core_ioremap_wc(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); - dev_priv->status_gfx_addr = 0; + ring->status_page.gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } - dev_priv->hw_status_page = dev_priv->hws_map.handle; + ring->status_page.page_addr = dev_priv->hws_map.handle; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", - dev_priv->status_gfx_addr); + ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws at %p\n", - dev_priv->hw_status_page); + ring->status_page.page_addr); return 0; } @@ -1307,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev) static void i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mm_node *compressed_fb, *compressed_llb; + struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); unsigned long cfb_base; unsigned long ll_base = 0; @@ -1356,19 +1280,30 @@ static void i915_setup_compression(struct drm_device *dev, int size) dev_priv->cfb_size = size; + intel_disable_fbc(dev); + dev_priv->compressed_fb = compressed_fb; + if (IS_GM45(dev)) { - g4x_disable_fbc(dev); I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { - i8xx_disable_fbc(dev); I915_WRITE(FBC_CFB_BASE, cfb_base); I915_WRITE(FBC_LL_BASE, ll_base); + dev_priv->compressed_llb = compressed_llb; } DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); } +static void i915_cleanup_compression(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + drm_mm_put_block(dev_priv->compressed_fb); + if (dev_priv->compressed_llb) + drm_mm_put_block(dev_priv->compressed_llb); +} + /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { @@ -1387,12 +1322,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { - printk(KERN_INFO "i915: switched off\n"); + printk(KERN_INFO "i915: switched on\n"); /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "i915: switched off\n"); + drm_kms_helper_poll_disable(dev); i915_suspend(dev, pmm); } } @@ -1467,19 +1404,23 @@ static int i915_load_modeset_init(struct drm_device *dev, /* if we have > 1 VGA cards, then disable the radeon VGA resources */ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); if (ret) - goto destroy_ringbuffer; + goto cleanup_ringbuffer; ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, i915_switcheroo_can_switch); if (ret) - goto destroy_ringbuffer; + goto cleanup_vga_client; + + /* IIR "flip pending" bit means done if this bit is set */ + if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) + dev_priv->flip_pending_is_done = true; intel_modeset_init(dev); ret = drm_irq_install(dev); if (ret) - goto destroy_ringbuffer; + goto cleanup_vga_switcheroo; /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ @@ -1491,11 +1432,20 @@ static int i915_load_modeset_init(struct drm_device *dev, I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - drm_helper_initial_config(dev); + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_irq; + drm_kms_helper_poll_init(dev); return 0; -destroy_ringbuffer: +cleanup_irq: + drm_irq_uninstall(dev); +cleanup_vga_switcheroo: + vga_switcheroo_unregister_client(dev->pdev); +cleanup_vga_client: + vga_client_register(dev->pdev, NULL, NULL, NULL); +cleanup_ringbuffer: mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); @@ -1527,14 +1477,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } -static void i915_get_mem_freq(struct drm_device *dev) +static void i915_pineview_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 tmp; - if (!IS_PINEVIEW(dev)) - return; - tmp = I915_READ(CLKCFG); switch (tmp & CLKCFG_FSB_MASK) { @@ -1563,8 +1510,525 @@ static void i915_get_mem_freq(struct drm_device *dev) dev_priv->mem_freq = 800; break; } + + /* detect pineview DDR3 setting */ + tmp = I915_READ(CSHRDDR3CTL); + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; +} + +static void i915_ironlake_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u16 ddrpll, csipll; + + ddrpll = I915_READ16(DDRMPLL1); + csipll = I915_READ16(CSIPLL0); + + switch (ddrpll & 0xff) { + case 0xc: + dev_priv->mem_freq = 800; + break; + case 0x10: + dev_priv->mem_freq = 1066; + break; + case 0x14: + dev_priv->mem_freq = 1333; + break; + case 0x18: + dev_priv->mem_freq = 1600; + break; + default: + DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", + ddrpll & 0xff); + dev_priv->mem_freq = 0; + break; + } + + dev_priv->r_t = dev_priv->mem_freq; + + switch (csipll & 0x3ff) { + case 0x00c: + dev_priv->fsb_freq = 3200; + break; + case 0x00e: + dev_priv->fsb_freq = 3733; + break; + case 0x010: + dev_priv->fsb_freq = 4266; + break; + case 0x012: + dev_priv->fsb_freq = 4800; + break; + case 0x014: + dev_priv->fsb_freq = 5333; + break; + case 0x016: + dev_priv->fsb_freq = 5866; + break; + case 0x018: + dev_priv->fsb_freq = 6400; + break; + default: + DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); + dev_priv->fsb_freq = 0; + break; + } + + if (dev_priv->fsb_freq == 3200) { + dev_priv->c_m = 0; + } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { + dev_priv->c_m = 1; + } else { + dev_priv->c_m = 2; + } +} + +struct v_table { + u8 vid; + unsigned long vd; /* in .1 mil */ + unsigned long vm; /* in .1 mil */ + u8 pvid; +}; + +static struct v_table v_table[] = { + { 0, 16125, 15000, 0x7f, }, + { 1, 16000, 14875, 0x7e, }, + { 2, 15875, 14750, 0x7d, }, + { 3, 15750, 14625, 0x7c, }, + { 4, 15625, 14500, 0x7b, }, + { 5, 15500, 14375, 0x7a, }, + { 6, 15375, 14250, 0x79, }, + { 7, 15250, 14125, 0x78, }, + { 8, 15125, 14000, 0x77, }, + { 9, 15000, 13875, 0x76, }, + { 10, 14875, 13750, 0x75, }, + { 11, 14750, 13625, 0x74, }, + { 12, 14625, 13500, 0x73, }, + { 13, 14500, 13375, 0x72, }, + { 14, 14375, 13250, 0x71, }, + { 15, 14250, 13125, 0x70, }, + { 16, 14125, 13000, 0x6f, }, + { 17, 14000, 12875, 0x6e, }, + { 18, 13875, 12750, 0x6d, }, + { 19, 13750, 12625, 0x6c, }, + { 20, 13625, 12500, 0x6b, }, + { 21, 13500, 12375, 0x6a, }, + { 22, 13375, 12250, 0x69, }, + { 23, 13250, 12125, 0x68, }, + { 24, 13125, 12000, 0x67, }, + { 25, 13000, 11875, 0x66, }, + { 26, 12875, 11750, 0x65, }, + { 27, 12750, 11625, 0x64, }, + { 28, 12625, 11500, 0x63, }, + { 29, 12500, 11375, 0x62, }, + { 30, 12375, 11250, 0x61, }, + { 31, 12250, 11125, 0x60, }, + { 32, 12125, 11000, 0x5f, }, + { 33, 12000, 10875, 0x5e, }, + { 34, 11875, 10750, 0x5d, }, + { 35, 11750, 10625, 0x5c, }, + { 36, 11625, 10500, 0x5b, }, + { 37, 11500, 10375, 0x5a, }, + { 38, 11375, 10250, 0x59, }, + { 39, 11250, 10125, 0x58, }, + { 40, 11125, 10000, 0x57, }, + { 41, 11000, 9875, 0x56, }, + { 42, 10875, 9750, 0x55, }, + { 43, 10750, 9625, 0x54, }, + { 44, 10625, 9500, 0x53, }, + { 45, 10500, 9375, 0x52, }, + { 46, 10375, 9250, 0x51, }, + { 47, 10250, 9125, 0x50, }, + { 48, 10125, 9000, 0x4f, }, + { 49, 10000, 8875, 0x4e, }, + { 50, 9875, 8750, 0x4d, }, + { 51, 9750, 8625, 0x4c, }, + { 52, 9625, 8500, 0x4b, }, + { 53, 9500, 8375, 0x4a, }, + { 54, 9375, 8250, 0x49, }, + { 55, 9250, 8125, 0x48, }, + { 56, 9125, 8000, 0x47, }, + { 57, 9000, 7875, 0x46, }, + { 58, 8875, 7750, 0x45, }, + { 59, 8750, 7625, 0x44, }, + { 60, 8625, 7500, 0x43, }, + { 61, 8500, 7375, 0x42, }, + { 62, 8375, 7250, 0x41, }, + { 63, 8250, 7125, 0x40, }, + { 64, 8125, 7000, 0x3f, }, + { 65, 8000, 6875, 0x3e, }, + { 66, 7875, 6750, 0x3d, }, + { 67, 7750, 6625, 0x3c, }, + { 68, 7625, 6500, 0x3b, }, + { 69, 7500, 6375, 0x3a, }, + { 70, 7375, 6250, 0x39, }, + { 71, 7250, 6125, 0x38, }, + { 72, 7125, 6000, 0x37, }, + { 73, 7000, 5875, 0x36, }, + { 74, 6875, 5750, 0x35, }, + { 75, 6750, 5625, 0x34, }, + { 76, 6625, 5500, 0x33, }, + { 77, 6500, 5375, 0x32, }, + { 78, 6375, 5250, 0x31, }, + { 79, 6250, 5125, 0x30, }, + { 80, 6125, 5000, 0x2f, }, + { 81, 6000, 4875, 0x2e, }, + { 82, 5875, 4750, 0x2d, }, + { 83, 5750, 4625, 0x2c, }, + { 84, 5625, 4500, 0x2b, }, + { 85, 5500, 4375, 0x2a, }, + { 86, 5375, 4250, 0x29, }, + { 87, 5250, 4125, 0x28, }, + { 88, 5125, 4000, 0x27, }, + { 89, 5000, 3875, 0x26, }, + { 90, 4875, 3750, 0x25, }, + { 91, 4750, 3625, 0x24, }, + { 92, 4625, 3500, 0x23, }, + { 93, 4500, 3375, 0x22, }, + { 94, 4375, 3250, 0x21, }, + { 95, 4250, 3125, 0x20, }, + { 96, 4125, 3000, 0x1f, }, + { 97, 4125, 3000, 0x1e, }, + { 98, 4125, 3000, 0x1d, }, + { 99, 4125, 3000, 0x1c, }, + { 100, 4125, 3000, 0x1b, }, + { 101, 4125, 3000, 0x1a, }, + { 102, 4125, 3000, 0x19, }, + { 103, 4125, 3000, 0x18, }, + { 104, 4125, 3000, 0x17, }, + { 105, 4125, 3000, 0x16, }, + { 106, 4125, 3000, 0x15, }, + { 107, 4125, 3000, 0x14, }, + { 108, 4125, 3000, 0x13, }, + { 109, 4125, 3000, 0x12, }, + { 110, 4125, 3000, 0x11, }, + { 111, 4125, 3000, 0x10, }, + { 112, 4125, 3000, 0x0f, }, + { 113, 4125, 3000, 0x0e, }, + { 114, 4125, 3000, 0x0d, }, + { 115, 4125, 3000, 0x0c, }, + { 116, 4125, 3000, 0x0b, }, + { 117, 4125, 3000, 0x0a, }, + { 118, 4125, 3000, 0x09, }, + { 119, 4125, 3000, 0x08, }, + { 120, 1125, 0, 0x07, }, + { 121, 1000, 0, 0x06, }, + { 122, 875, 0, 0x05, }, + { 123, 750, 0, 0x04, }, + { 124, 625, 0, 0x03, }, + { 125, 500, 0, 0x02, }, + { 126, 375, 0, 0x01, }, + { 127, 0, 0, 0x00, }, +}; + +struct cparams { + int i; + int t; + int m; + int c; +}; + +static struct cparams cparams[] = { + { 1, 1333, 301, 28664 }, + { 1, 1066, 294, 24460 }, + { 1, 800, 294, 25192 }, + { 0, 1333, 276, 27605 }, + { 0, 1066, 276, 27605 }, + { 0, 800, 231, 23784 }, +}; + +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) +{ + u64 total_count, diff, ret; + u32 count1, count2, count3, m = 0, c = 0; + unsigned long now = jiffies_to_msecs(jiffies), diff1; + int i; + + diff1 = now - dev_priv->last_time1; + + count1 = I915_READ(DMIEC); + count2 = I915_READ(DDREC); + count3 = I915_READ(CSIEC); + + total_count = count1 + count2 + count3; + + /* FIXME: handle per-counter overflow */ + if (total_count < dev_priv->last_count1) { + diff = ~0UL - dev_priv->last_count1; + diff += total_count; + } else { + diff = total_count - dev_priv->last_count1; + } + + for (i = 0; i < ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == dev_priv->c_m && + cparams[i].t == dev_priv->r_t) { + m = cparams[i].m; + c = cparams[i].c; + break; + } + } + + div_u64(diff, diff1); + ret = ((m * diff) + c); + div_u64(ret, 10); + + dev_priv->last_count1 = total_count; + dev_priv->last_time1 = now; + + return ret; +} + +unsigned long i915_mch_val(struct drm_i915_private *dev_priv) +{ + unsigned long m, x, b; + u32 tsfs; + + tsfs = I915_READ(TSFS); + + m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); + x = I915_READ8(TR1); + + b = tsfs & TSFS_INTR_MASK; + + return ((m * x) / 127) - b; +} + +static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) +{ + unsigned long val = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(v_table); i++) { + if (v_table[i].pvid == pxvid) { + if (IS_MOBILE(dev_priv->dev)) + val = v_table[i].vm; + else + val = v_table[i].vd; + } + } + + return val; +} + +void i915_update_gfx_val(struct drm_i915_private *dev_priv) +{ + struct timespec now, diff1; + u64 diff; + unsigned long diffms; + u32 count; + + getrawmonotonic(&now); + diff1 = timespec_sub(now, dev_priv->last_time2); + + /* Don't divide by 0 */ + diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; + if (!diffms) + return; + + count = I915_READ(GFXEC); + + if (count < dev_priv->last_count2) { + diff = ~0UL - dev_priv->last_count2; + diff += count; + } else { + diff = count - dev_priv->last_count2; + } + + dev_priv->last_count2 = count; + dev_priv->last_time2 = now; + + /* More magic constants... */ + diff = diff * 1181; + div_u64(diff, diffms * 10); + dev_priv->gfx_power = diff; } +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) +{ + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(dev_priv, pxvid); + + state1 = ext_v; + + t = i915_mch_val(dev_priv); + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + if (t > 80) + corr = ((t * 2349) + 135940); + else if (t >= 50) + corr = ((t * 964) + 29317); + else /* < 50 */ + corr = ((t * 301) + 1004); + + corr = corr * ((150142 * state1) / 10000 - 78642); + corr /= 100000; + corr2 = (corr * dev_priv->corr); + + state2 = (corr2 * state1) / 10000; + state2 /= 100; /* convert to mW */ + + i915_update_gfx_val(dev_priv); + + return dev_priv->gfx_power + state2; +} + +/* Global for IPS driver to get at the current i915 device */ +static struct drm_i915_private *i915_mch_dev; +/* + * Lock protecting IPS related data structures + * - i915_mch_dev + * - dev_priv->max_delay + * - dev_priv->min_delay + * - dev_priv->fmax + * - dev_priv->gpu_busy + */ +DEFINE_SPINLOCK(mchdev_lock); + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *dev_priv; + unsigned long chipset_val, graphics_val, ret = 0; + + spin_lock(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + chipset_val = i915_chipset_val(dev_priv); + graphics_val = i915_gfx_val(dev_priv); + + ret = chipset_val + graphics_val; + +out_unlock: + spin_unlock(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_read_mch_val); + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->max_delay > dev_priv->fmax) + dev_priv->max_delay--; + +out_unlock: + spin_unlock(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_raise); + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->max_delay < dev_priv->min_delay) + dev_priv->max_delay++; + +out_unlock: + spin_unlock(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_lower); + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *dev_priv; + bool ret = false; + + spin_lock(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + ret = dev_priv->busy; + +out_unlock: + spin_unlock(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_busy); + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + dev_priv->max_delay = dev_priv->fstart; + + if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) + ret = false; + +out_unlock: + spin_unlock(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1578,11 +2042,10 @@ static void i915_get_mem_freq(struct drm_device *dev) */ int i915_driver_load(struct drm_device *dev, unsigned long flags) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv; resource_size_t base, size; int ret = 0, mmio_bar; uint32_t agp_size, prealloc_size, prealloc_start; - /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1660,6 +2123,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->has_gem = 0; } + if (dev_priv->has_gem == 0 && + drm_core_check_feature(dev, DRIVER_MODESET)) { + DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); + ret = -ENODEV; + goto out_iomapfree; + } + dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { @@ -1679,7 +2149,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_workqueue_free; } - i915_get_mem_freq(dev); + if (IS_PINEVIEW(dev)) + i915_pineview_get_mem_freq(dev); + else if (IS_IRONLAKE(dev)) + i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there @@ -1697,7 +2170,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->user_irq_lock); spin_lock_init(&dev_priv->error_lock); - dev_priv->user_irq_refcount = 0; dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); @@ -1710,6 +2182,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Start out suspended */ dev_priv->mm.suspended = 1; + intel_detect_pch(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev, prealloc_start, prealloc_size, agp_size); @@ -1724,6 +2198,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); + + spin_lock(&mchdev_lock); + i915_mch_dev = dev_priv; + dev_priv->mchdev_lock = &mchdev_lock; + spin_unlock(&mchdev_lock); + return 0; out_workqueue_free: @@ -1745,6 +2225,10 @@ int i915_driver_unload(struct drm_device *dev) i915_destroy_error_state(dev); + spin_lock(&mchdev_lock); + i915_mch_dev = NULL; + spin_unlock(&mchdev_lock); + destroy_workqueue(dev_priv->wq); del_timer_sync(&dev_priv->hangcheck_timer); @@ -1756,6 +2240,8 @@ int i915_driver_unload(struct drm_device *dev) } if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_modeset_cleanup(dev); + /* * free the memory space allocated for the child device * config parsed from VBT @@ -1779,13 +2265,13 @@ int i915_driver_unload(struct drm_device *dev) intel_opregion_free(dev, 0); if (drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_modeset_cleanup(dev); - i915_gem_free_all_phys_object(dev); mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); + if (I915_HAS_FBC(dev) && i915_powersave) + i915_cleanup_compression(dev); drm_mm_takedown(&dev_priv->vram); i915_gem_lastclose(dev); @@ -1881,29 +2367,29 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), - DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), - DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1b2e95455c05..423dc90c1e20 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -60,98 +60,99 @@ extern int intel_agp_enabled; .subdevice = PCI_ANY_ID, \ .driver_data = (unsigned long) info } -const static struct intel_device_info intel_i830_info = { +static const struct intel_device_info intel_i830_info = { .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_845g_info = { +static const struct intel_device_info intel_845g_info = { .is_i8xx = 1, }; -const static struct intel_device_info intel_i85x_info = { - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, +static const struct intel_device_info intel_i85x_info = { + .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_i865g_info = { +static const struct intel_device_info intel_i865g_info = { .is_i8xx = 1, }; -const static struct intel_device_info intel_i915g_info = { +static const struct intel_device_info intel_i915g_info = { .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_i915gm_info = { - .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, +static const struct intel_device_info intel_i915gm_info = { + .is_i9xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_i945g_info = { +static const struct intel_device_info intel_i945g_info = { .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_i945gm_info = { - .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, +static const struct intel_device_info intel_i945gm_info = { + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; -const static struct intel_device_info intel_i965g_info = { +static const struct intel_device_info intel_i965g_info = { .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info intel_i965gm_info = { .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_g33_info = { +static const struct intel_device_info intel_g33_info = { .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_g45_info = { +static const struct intel_device_info intel_g45_info = { .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_gm45_info = { +static const struct intel_device_info intel_gm45_info = { .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_pineview_info = { +static const struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info intel_ironlake_d_info = { .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info intel_ironlake_m_info = { .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, }; -const static struct intel_device_info intel_sandybridge_d_info = { +static const struct intel_device_info intel_sandybridge_d_info = { .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, + .has_hotplug = 1, .is_gen6 = 1, }; -const static struct intel_device_info intel_sandybridge_m_info = { +static const struct intel_device_info intel_sandybridge_m_info = { .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, + .has_hotplug = 1, .is_gen6 = 1, }; -const static struct pci_device_id pciidlist[] = { +static const struct pci_device_id pciidlist[] = { INTEL_VGA_DEVICE(0x3577, &intel_i830_info), INTEL_VGA_DEVICE(0x2562, &intel_845g_info), INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), - INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), + INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), @@ -187,6 +188,35 @@ const static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); #endif +#define INTEL_PCH_DEVICE_ID_MASK 0xff00 +#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 + +void intel_detect_pch (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pch; + + /* + * The reason to probe ISA bridge instead of Dev31:Fun0 is to + * make graphics device passthrough work easy for VMM, that only + * need to expose ISA bridge to let driver know the real hardware + * underneath. This is a requirement from virtualization team. + */ + pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); + if (pch) { + if (pch->vendor == PCI_VENDOR_ID_INTEL) { + int id; + id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + + if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_CPT; + DRM_DEBUG_KMS("Found CougarPoint PCH\n"); + } + } + pci_dev_put(pch); + } +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -310,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) /* * Clear request list */ - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev, &dev_priv->render_ring); if (need_display) i915_save_display(dev); @@ -340,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags) } } else { DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); + mutex_unlock(&dev->struct_mutex); return -ENODEV; } @@ -358,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags) * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || - !dev_priv->mm.suspended) { - drm_i915_ring_buffer_t *ring = &dev_priv->ring; - struct drm_gem_object *obj = ring->ring_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + !dev_priv->mm.suspended) { + struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; - - /* Stop the ring if it's running. */ - I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_TAIL, 0); - I915_WRITE(PRB0_HEAD, 0); - - /* Initialize the ring. */ - I915_WRITE(PRB0_START, obj_priv->gtt_offset); - I915_WRITE(PRB0_CTL, - ((obj->size - 4096) & RING_NR_PAGES) | - RING_NO_REPORT | - RING_VALID); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); - else { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->Size; - } - + ring->init(dev, ring); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 979439cfb827..2e1744d37ad5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -32,6 +32,7 @@ #include "i915_reg.h" #include "intel_bios.h" +#include "intel_ringbuffer.h" #include <linux/io-mapping.h> /* General customization: @@ -55,6 +56,8 @@ enum plane { #define I915_NUM_PIPE 2 +#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + /* Interface history: * * 1.1: Original. @@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object { struct drm_gem_object *cur_obj; }; -typedef struct _drm_i915_ring_buffer { - unsigned long Size; - u8 *virtual_start; - int head; - int tail; - int space; - drm_local_map_t map; - struct drm_gem_object *ring_obj; -} drm_i915_ring_buffer_t; - struct mem_block { struct mem_block *next; struct mem_block *prev; @@ -128,6 +121,7 @@ struct drm_i915_master_private { struct drm_i915_fence_reg { struct drm_gem_object *obj; + struct list_head lru_list; }; struct sdvo_device_mapping { @@ -135,6 +129,7 @@ struct sdvo_device_mapping { u8 slave_addr; u8 dvo_wiring; u8 initialized; + u8 ddc_pin; }; struct drm_i915_error_state { @@ -175,7 +170,7 @@ struct drm_i915_error_state { struct drm_i915_display_funcs { void (*dpms)(struct drm_crtc *crtc, int mode); - bool (*fbc_enabled)(struct drm_crtc *crtc); + bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); @@ -195,6 +190,7 @@ struct intel_overlay; struct intel_device_info { u8 is_mobile : 1; u8 is_i8xx : 1; + u8 is_i85x : 1; u8 is_i915g : 1; u8 is_i9xx : 1; u8 is_i945gm : 1; @@ -205,6 +201,7 @@ struct intel_device_info { u8 is_g4x : 1; u8 is_pineview : 1; u8 is_ironlake : 1; + u8 is_gen6 : 1; u8 has_fbc : 1; u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; @@ -218,8 +215,18 @@ enum no_fbc_reason { FBC_MODE_TOO_LARGE, /* mode too large for compression */ FBC_BAD_PLANE, /* fbc not supported on plane */ FBC_NOT_TILED, /* buffer not tiled */ + FBC_MULTIPLE_PIPES, /* more than one pipe active */ }; +enum intel_pch { + PCH_IBX, /* Ibexpeak PCH */ + PCH_CPT, /* Cougarpoint PCH */ +}; + +#define QUIRK_PIPEA_FORCE (1<<0) + +struct intel_fbdev; + typedef struct drm_i915_private { struct drm_device *dev; @@ -230,15 +237,16 @@ typedef struct drm_i915_private { void __iomem *regs; struct pci_dev *bridge_dev; - drm_i915_ring_buffer_t ring; + struct intel_ring_buffer render_ring; + struct intel_ring_buffer bsd_ring; drm_dma_handle_t *status_page_dmah; - void *hw_status_page; + void *seqno_page; dma_addr_t dma_status_page; uint32_t counter; - unsigned int status_gfx_addr; + unsigned int seqno_gfx_addr; drm_local_map_t hws_map; - struct drm_gem_object *hws_obj; + struct drm_gem_object *seqno_obj; struct drm_gem_object *pwrctx; struct resource mch_res; @@ -253,8 +261,6 @@ typedef struct drm_i915_private { atomic_t irq_received; /** Protects user_irq_refcount and irq_mask_reg */ spinlock_t user_irq_lock; - /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ - int user_irq_refcount; u32 trace_irq_seqno; /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; @@ -275,6 +281,7 @@ typedef struct drm_i915_private { struct mem_block *agp_heap; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; + int num_pipe; /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ @@ -320,7 +327,7 @@ typedef struct drm_i915_private { int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - unsigned int fsb_freq, mem_freq; + unsigned int fsb_freq, mem_freq, is_ddr3; spinlock_t error_lock; struct drm_i915_error_state *first_error; @@ -330,6 +337,11 @@ typedef struct drm_i915_private { /* Display functions */ struct drm_i915_display_funcs display; + /* PCH chipset type */ + enum intel_pch pch_type; + + unsigned long quirks; + /* Register state */ bool modeset_on_lid; u8 saveLBB; @@ -497,18 +509,7 @@ typedef struct drm_i915_private { */ struct list_head shrink_list; - /** - * List of objects currently involved in rendering from the - * ringbuffer. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_rendering_seqno - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. - */ spinlock_t active_list_lock; - struct list_head active_list; /** * List of objects which are not in the ringbuffer but which @@ -546,12 +547,6 @@ typedef struct drm_i915_private { struct list_head fence_list; /** - * List of breadcrumbs associated with GPU requests currently - * outstanding. - */ - struct list_head request_list; - - /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to @@ -606,10 +601,13 @@ typedef struct drm_i915_private { struct drm_crtc *plane_to_crtc_mapping[2]; struct drm_crtc *pipe_to_crtc_mapping[2]; wait_queue_head_t pending_flip_queue; + bool flip_pending_is_done; /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; + /* indicate whether the LVDS EDID is OK */ + bool lvds_edid_good; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct work_struct idle_work; @@ -625,13 +623,31 @@ typedef struct drm_i915_private { u8 cur_delay; u8 min_delay; u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + int c_m; + int r_t; + u8 corr; + spinlock_t *mchdev_lock; enum no_fbc_reason no_fbc_reason; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; + + /* list of fbdev register on this device */ + struct intel_fbdev *fbdev; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { - struct drm_gem_object *obj; + struct drm_gem_object base; /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; @@ -641,27 +657,69 @@ struct drm_i915_gem_object { /** This object's place on GPU write list */ struct list_head gpu_write_list; - /** This object's place on the fenced object LRU */ - struct list_head fence_list; - /** * This is set if the object is on the active or flushing lists * (has pending rendering), and is not set if it's on inactive (ready * to be unbound). */ - int active; + unsigned int active : 1; /** * This is set if the object has been written to since last bound * to the GTT */ - int dirty; + unsigned int dirty : 1; + + /** + * Fence register bits (if any) for this object. Will be set + * as needed when mapped into the GTT. + * Protected by dev->struct_mutex. + * + * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) + */ + int fence_reg : 5; + + /** + * Used for checking the object doesn't appear more than once + * in an execbuffer object list. + */ + unsigned int in_execbuffer : 1; + + /** + * Advice: are the backing pages purgeable? + */ + unsigned int madv : 2; + + /** + * Refcount for the pages array. With the current locking scheme, there + * are at most two concurrent users: Binding a bo to the gtt and + * pwrite/pread using physical addresses. So two bits for a maximum + * of two users are enough. + */ + unsigned int pages_refcount : 2; +#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 + + /** + * Current tiling mode for the object. + */ + unsigned int tiling_mode : 2; + + /** How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, pin_ioctl + * (via user_pin_count), execbuffer (objects are not allowed multiple + * times for the same batchbuffer), and the framebuffer code. When + * switching/pageflipping, the framebuffer code has at most two buffers + * pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. */ + int pin_count : 4; +#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** AGP memory structure for our GTT binding. */ DRM_AGP_MEM *agp_mem; struct page **pages; - int pages_refcount; /** * Current offset of the object in GTT space. @@ -670,26 +728,18 @@ struct drm_i915_gem_object { */ uint32_t gtt_offset; + /* Which ring is refering to is this object */ + struct intel_ring_buffer *ring; + /** * Fake offset for use by mmap(2) */ uint64_t mmap_offset; - /** - * Fence register bits (if any) for this object. Will be set - * as needed when mapped into the GTT. - * Protected by dev->struct_mutex. - */ - int fence_reg; - - /** How many users have pinned this object in GTT space */ - int pin_count; - /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; - /** Current tiling mode for the object. */ - uint32_t tiling_mode; + /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ @@ -712,17 +762,6 @@ struct drm_i915_gem_object { struct drm_i915_gem_phys_object *phys_obj; /** - * Used for checking the object doesn't appear more than once - * in an execbuffer object list. - */ - int in_execbuffer; - - /** - * Advice: are the backing pages purgeable? - */ - int madv; - - /** * Number of crtcs where this object is currently the fb, but * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. @@ -730,6 +769,8 @@ struct drm_i915_gem_object { atomic_t pending_flip; }; +#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) + /** * Request queue structure. * @@ -741,6 +782,9 @@ struct drm_i915_gem_object { * an emission time with seqnos for tracking how far ahead of the GPU we are. */ struct drm_i915_gem_request { + /** On Which ring this request was generated */ + struct intel_ring_buffer *ring; + /** GEM sequence number associated with this request. */ uint32_t seqno; @@ -797,6 +841,11 @@ extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4); extern int i965_reset(struct drm_device *dev, u8 flags); +extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); +extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); + /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); @@ -805,9 +854,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_user_irq_get(struct drm_device *dev); void i915_trace_irq_get(struct drm_device *dev, u32 seqno); -void i915_user_irq_put(struct drm_device *dev); extern void i915_enable_interrupt (struct drm_device *dev); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); @@ -825,6 +872,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); +extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); +extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, + u32 mask); +extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, + u32 mask); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); @@ -890,17 +942,21 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); +struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, + size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -uint32_t i915_get_gem_seqno(struct drm_device *dev); +uint32_t i915_get_gem_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring); bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); -void i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_requests(struct drm_device *dev, + struct intel_ring_buffer *ring); void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, @@ -911,9 +967,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); int i915_gem_idle(struct drm_device *dev); -uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains); -int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); +uint32_t i915_add_request(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t flush_domains, + struct intel_ring_buffer *ring); +int i915_do_wait_request(struct drm_device *dev, + uint32_t seqno, int interruptible, + struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); @@ -986,6 +1046,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void i8xx_disable_fbc(struct drm_device *dev); extern void g4x_disable_fbc(struct drm_device *dev); +extern void intel_disable_fbc(struct drm_device *dev); +extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern bool intel_fbc_enabled(struct drm_device *dev); +extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern void intel_detect_pch (struct drm_device *dev); +extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); /** * Lock test for when it's just for synchronization of ring access. @@ -994,7 +1060,8 @@ extern void g4x_disable_fbc(struct drm_device *dev); * has access to the ring. */ #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ - if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ + if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ + == NULL) \ LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) @@ -1007,35 +1074,31 @@ extern void g4x_disable_fbc(struct drm_device *dev); #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) #define I915_READ64(reg) readq(dev_priv->regs + (reg)) #define POSTING_READ(reg) (void)I915_READ(reg) +#define POSTING_READ16(reg) (void)I915_READ16(reg) #define I915_VERBOSE 0 -#define RING_LOCALS volatile unsigned int *ring_virt__; - -#define BEGIN_LP_RING(n) do { \ - int bytes__ = 4*(n); \ - if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ - /* a wrap must occur between instructions so pad beforehand */ \ - if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \ - i915_wrap_ring(dev); \ - if (unlikely (dev_priv->ring.space < bytes__)) \ - i915_wait_ring(dev, bytes__, __func__); \ - ring_virt__ = (unsigned int *) \ - (dev_priv->ring.virtual_start + dev_priv->ring.tail); \ - dev_priv->ring.tail += bytes__; \ - dev_priv->ring.tail &= dev_priv->ring.Size - 1; \ - dev_priv->ring.space -= bytes__; \ +#define BEGIN_LP_RING(n) do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ + if (I915_VERBOSE) \ + DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ + intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ } while (0) -#define OUT_RING(n) do { \ - if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *ring_virt__++ = (n); \ + +#define OUT_RING(x) do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ + if (I915_VERBOSE) \ + DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ + intel_ring_emit(dev, &dev_priv->render_ring, x); \ } while (0) #define ADVANCE_LP_RING() do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ if (I915_VERBOSE) \ - DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ - I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ + DRM_DEBUG("ADVANCE_LP_RING %x\n", \ + dev_priv->render_ring.tail); \ + intel_ring_advance(dev, &dev_priv->render_ring); \ } while(0) /** @@ -1053,19 +1116,17 @@ extern void g4x_disable_fbc(struct drm_device *dev); * * The area from dword 0x20 to 0x3ff is available for driver usage. */ -#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) +#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ + (dev_priv->render_ring.status_page.page_addr))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 -extern int i915_wrap_ring(struct drm_device * dev); -extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) ((dev)->pci_device == 0x3582) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) @@ -1084,6 +1145,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) +#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_GEN3(dev) (IS_I915G(dev) || \ @@ -1105,10 +1167,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2A42 || \ (dev)->pci_device == 0x2E42) +#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) -#define IS_GEN6(dev) ((dev)->pci_device == 0x0102) - /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ @@ -1119,7 +1180,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ - !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) + !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ + !IS_GEN6(dev)) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) @@ -1131,6 +1193,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) + +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fba37e9f775d..5aa747fc25a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -31,11 +31,10 @@ #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" +#include <linux/slab.h> #include <linux/swap.h> #include <linux/pci.h> -#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) - static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -123,7 +122,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, args->size = roundup(args->size, PAGE_SIZE); /* Allocate the new object */ - obj = drm_gem_object_alloc(dev, args->size); + obj = i915_gem_alloc_object(dev, args->size); if (obj == NULL) return -ENOMEM; @@ -162,13 +161,13 @@ fast_shmem_read(struct page **pages, static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) { drm_i915_private_t *dev_priv = obj->dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj_priv->tiling_mode != I915_TILING_NONE; } -static inline int +static inline void slow_shmem_copy(struct page *dst_page, int dst_offset, struct page *src_page, @@ -177,25 +176,16 @@ slow_shmem_copy(struct page *dst_page, { char *dst_vaddr, *src_vaddr; - dst_vaddr = kmap_atomic(dst_page, KM_USER0); - if (dst_vaddr == NULL) - return -ENOMEM; - - src_vaddr = kmap_atomic(src_page, KM_USER1); - if (src_vaddr == NULL) { - kunmap_atomic(dst_vaddr, KM_USER0); - return -ENOMEM; - } + dst_vaddr = kmap(dst_page); + src_vaddr = kmap(src_page); memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); - kunmap_atomic(src_vaddr, KM_USER1); - kunmap_atomic(dst_vaddr, KM_USER0); - - return 0; + kunmap(src_page); + kunmap(dst_page); } -static inline int +static inline void slow_shmem_bit17_copy(struct page *gpu_page, int gpu_offset, struct page *cpu_page, @@ -215,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, cpu_page, cpu_offset, length); } - gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); - if (gpu_vaddr == NULL) - return -ENOMEM; - - cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); - if (cpu_vaddr == NULL) { - kunmap_atomic(gpu_vaddr, KM_USER0); - return -ENOMEM; - } + gpu_vaddr = kmap(gpu_page); + cpu_vaddr = kmap(cpu_page); /* Copy the data, XORing A6 with A17 (1). The user already knows he's * XORing with the other bits (A9 for Y, A9 and A10 for X) @@ -247,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, length -= this_length; } - kunmap_atomic(cpu_vaddr, KM_USER1); - kunmap_atomic(gpu_vaddr, KM_USER0); - - return 0; + kunmap(cpu_page); + kunmap(gpu_page); } /** @@ -263,7 +244,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pread *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ssize_t remain; loff_t offset, page_base; char __user *user_data; @@ -284,7 +265,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -353,7 +334,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pread *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -402,7 +383,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -426,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, page_length = PAGE_SIZE - data_page_offset; if (do_bit17_swizzling) { - ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length, - 1); - } else { - ret = slow_shmem_copy(user_pages[data_page_index], - data_page_offset, - obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], shmem_page_offset, - page_length); + user_pages[data_page_index], + data_page_offset, + page_length, + 1); + } else { + slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); } - if (ret) - goto fail_put_pages; remain -= page_length; data_ptr += page_length; @@ -478,7 +457,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Bounds check source. * @@ -530,25 +509,24 @@ fast_user_write(struct io_mapping *mapping, * page faults */ -static inline int +static inline void slow_kernel_write(struct io_mapping *mapping, loff_t gtt_base, int gtt_offset, struct page *user_page, int user_offset, int length) { - char *src_vaddr, *dst_vaddr; - unsigned long unwritten; + char __iomem *dst_vaddr; + char *src_vaddr; - dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); - src_vaddr = kmap_atomic(user_page, KM_USER1); - unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, - src_vaddr + user_offset, - length); - kunmap_atomic(src_vaddr, KM_USER1); - io_mapping_unmap_atomic(dst_vaddr); - if (unwritten) - return -EFAULT; - return 0; + dst_vaddr = io_mapping_map_wc(mapping, gtt_base); + src_vaddr = kmap(user_page); + + memcpy_toio(dst_vaddr + gtt_offset, + src_vaddr + user_offset, + length); + + kunmap(user_page); + io_mapping_unmap(dst_vaddr); } static inline int @@ -580,7 +558,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; @@ -604,7 +582,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto fail; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; while (remain > 0) { @@ -654,7 +632,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t gtt_page_base, offset; @@ -698,7 +676,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto out_unpin_object; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; while (remain > 0) { @@ -721,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - ret = slow_kernel_write(dev_priv->mm.gtt_mapping, - gtt_page_base, gtt_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length); - - /* If we get a fault while copying data, then (presumably) our - * source page isn't available. Return the error and we'll - * retry in the slow path. - */ - if (ret) - goto out_unpin_object; + slow_kernel_write(dev_priv->mm.gtt_mapping, + gtt_page_base, gtt_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); remain -= page_length; offset += page_length; @@ -760,7 +731,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ssize_t remain; loff_t offset, page_base; char __user *user_data; @@ -780,7 +751,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -828,7 +799,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -876,7 +847,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -901,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, page_length = PAGE_SIZE - data_page_offset; if (do_bit17_swizzling) { - ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length, - 0); - } else { - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], shmem_page_offset, user_pages[data_page_index], data_page_offset, - page_length); + page_length, + 0); + } else { + slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); } - if (ret) - goto fail_put_pages; remain -= page_length; data_ptr += page_length; @@ -951,7 +920,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Bounds check destination. * @@ -972,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj_priv->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); else if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0) { + dev->gtt_total != 0 && + obj->write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); if (ret == -EFAULT) { ret = i915_gem_gtt_pwrite_slow(dev, obj, args, @@ -1033,7 +1003,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); @@ -1050,7 +1020,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * about to occur. */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - list_move_tail(&obj_priv->fence_list, + struct drm_i915_fence_reg *reg = + &dev_priv->fence_regs[obj_priv->fence_reg]; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } @@ -1095,7 +1067,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, DRM_INFO("%s: sw_finish %d (%p %zd)\n", __func__, args->handle, obj, obj->size); #endif - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Pinned buffers may be scanout, so flush the cache */ if (obj_priv->pin_count) @@ -1166,7 +1138,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1233,7 +1205,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_map_list *list; struct drm_local_map *map; int ret = 0; @@ -1304,7 +1276,7 @@ void i915_gem_release_mmap(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev->dev_mapping) unmap_mapping_range(dev->dev_mapping, @@ -1315,7 +1287,7 @@ static void i915_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_gem_mm *mm = dev->mm_private; struct drm_map_list *list; @@ -1346,7 +1318,7 @@ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int start, i; /* @@ -1405,7 +1377,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); @@ -1449,7 +1421,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, void i915_gem_object_put_pages(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size / PAGE_SIZE; int i; @@ -1466,9 +1438,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) obj_priv->dirty = 0; for (i = 0; i < page_count; i++) { - if (obj_priv->pages[i] == NULL) - break; - if (obj_priv->dirty) set_page_dirty(obj_priv->pages[i]); @@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) +i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, + struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + BUG_ON(ring == NULL); + obj_priv->ring = ring; /* Add a reference if we're newly entering the active list. */ if (!obj_priv->active) { @@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) } /* Move from whatever list we were on to the tail of execution. */ spin_lock(&dev_priv->mm.active_list_lock); - list_move_tail(&obj_priv->list, - &dev_priv->mm.active_list); + list_move_tail(&obj_priv->list, &ring->active_list); spin_unlock(&dev_priv->mm.active_list_lock); obj_priv->last_rendering_seqno = seqno; } @@ -1508,7 +1479,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); @@ -1519,7 +1490,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) static void i915_gem_object_truncate(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; inode = obj->filp->f_path.dentry->d_inode; @@ -1540,7 +1511,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) @@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) BUG_ON(!list_empty(&obj_priv->gpu_write_list)); obj_priv->last_rendering_seqno = 0; + obj_priv->ring = NULL; if (obj_priv->active) { obj_priv->active = 0; drm_gem_object_unreference(obj); @@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) static void i915_gem_process_flushing_list(struct drm_device *dev, - uint32_t flush_domains, uint32_t seqno) + uint32_t flush_domains, uint32_t seqno, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv, *next; @@ -1568,20 +1541,24 @@ i915_gem_process_flushing_list(struct drm_device *dev, list_for_each_entry_safe(obj_priv, next, &dev_priv->mm.gpu_write_list, gpu_write_list) { - struct drm_gem_object *obj = obj_priv->obj; + struct drm_gem_object *obj = &obj_priv->base; if ((obj->write_domain & flush_domains) == - obj->write_domain) { + obj->write_domain && + obj_priv->ring->ring_flag == ring->ring_flag) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, seqno); + i915_gem_object_move_to_active(obj, seqno, ring); /* update the fence lru list */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - list_move_tail(&obj_priv->fence_list, + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_fence_reg *reg = + &dev_priv->fence_regs[obj_priv->fence_reg]; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + } trace_i915_gem_object_change_domain(obj, obj->read_domains, @@ -1590,24 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev, } } -/** - * Creates a new sequence number, emitting a write of it to the status page - * plus an interrupt, which will trigger i915_user_interrupt_handler. - * - * Must be called with struct_lock held. - * - * Returned sequence numbers are nonzero on success. - */ uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains) + uint32_t flush_domains, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_file_private *i915_file_priv = NULL; struct drm_i915_gem_request *request; uint32_t seqno; int was_empty; - RING_LOCALS; if (file_priv != NULL) i915_file_priv = file_priv->driver_priv; @@ -1616,28 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, if (request == NULL) return 0; - /* Grab the seqno we're going to make this request be, and bump the - * next (skipping 0 so it can be the reserved no-seqno value). - */ - seqno = dev_priv->mm.next_gem_seqno; - dev_priv->mm.next_gem_seqno++; - if (dev_priv->mm.next_gem_seqno == 0) - dev_priv->mm.next_gem_seqno++; - - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(seqno); - - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); - - DRM_DEBUG_DRIVER("%d\n", seqno); + seqno = ring->add_request(dev, ring, file_priv, flush_domains); request->seqno = seqno; + request->ring = ring; request->emitted_jiffies = jiffies; - was_empty = list_empty(&dev_priv->mm.request_list); - list_add_tail(&request->list, &dev_priv->mm.request_list); + was_empty = list_empty(&ring->request_list); + list_add_tail(&request->list, &ring->request_list); + if (i915_file_priv) { list_add_tail(&request->client_list, &i915_file_priv->mm.request_list); @@ -1649,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, * domain we're flushing with our flush. */ if (flush_domains != 0) - i915_gem_process_flushing_list(dev, flush_domains, seqno); + i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); if (!dev_priv->mm.suspended) { mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); @@ -1666,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, * before signalling the CPU */ static uint32_t -i915_retire_commands(struct drm_device *dev) +i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; uint32_t flush_domains = 0; - RING_LOCALS; /* The sampler always gets flushed on i965 (sigh) */ if (IS_I965G(dev)) flush_domains |= I915_GEM_DOMAIN_SAMPLER; - BEGIN_LP_RING(2); - OUT_RING(cmd); - OUT_RING(0); /* noop */ - ADVANCE_LP_RING(); + + ring->flush(dev, ring, + I915_GEM_DOMAIN_COMMAND, flush_domains); return flush_domains; } @@ -1699,14 +1649,14 @@ i915_gem_retire_request(struct drm_device *dev, * by the ringbuffer to the flushing/inactive lists as appropriate. */ spin_lock(&dev_priv->mm.active_list_lock); - while (!list_empty(&dev_priv->mm.active_list)) { + while (!list_empty(&request->ring->active_list)) { struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - obj_priv = list_first_entry(&dev_priv->mm.active_list, + obj_priv = list_first_entry(&request->ring->active_list, struct drm_i915_gem_object, list); - obj = obj_priv->obj; + obj = &obj_priv->base; /* If the seqno being retired doesn't match the oldest in the * list, then the oldest in the list must still be newer than @@ -1750,32 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) } uint32_t -i915_get_gem_seqno(struct drm_device *dev) +i915_get_gem_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - - return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); + return ring->get_gem_seqno(dev, ring); } /** * This function clears the request list as sequence numbers are passed. */ void -i915_gem_retire_requests(struct drm_device *dev) +i915_gem_retire_requests(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) + if (!ring->status_page.page_addr + || list_empty(&ring->request_list)) return; - seqno = i915_get_gem_seqno(dev); + seqno = i915_get_gem_seqno(dev, ring); - while (!list_empty(&dev_priv->mm.request_list)) { + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; uint32_t retiring_seqno; - request = list_first_entry(&dev_priv->mm.request_list, + request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, list); retiring_seqno = request->seqno; @@ -1793,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev) if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - i915_user_irq_put(dev); + + ring->user_irq_put(dev, ring); dev_priv->trace_irq_seqno = 0; } } @@ -1809,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work) dev = dev_priv->dev; mutex_lock(&dev->struct_mutex); - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev, &dev_priv->render_ring); + + if (HAS_BSD(dev)) + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + if (!dev_priv->mm.suspended && - !list_empty(&dev_priv->mm.request_list)) + (!list_empty(&dev_priv->render_ring.request_list) || + (HAS_BSD(dev) && + !list_empty(&dev_priv->bsd_ring.request_list)))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } int -i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) +i915_do_wait_request(struct drm_device *dev, uint32_t seqno, + int interruptible, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; u32 ier; @@ -1828,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) if (atomic_read(&dev_priv->mm.wedged)) return -EIO; - if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { + if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else @@ -1842,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) trace_i915_gem_request_wait_begin(dev, seqno); - dev_priv->mm.waiting_gem_seqno = seqno; - i915_user_irq_get(dev); + ring->waiting_gem_seqno = seqno; + ring->user_irq_get(dev, ring); if (interruptible) - ret = wait_event_interruptible(dev_priv->irq_queue, - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || - atomic_read(&dev_priv->mm.wedged)); + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed( + ring->get_gem_seqno(dev, ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); else - wait_event(dev_priv->irq_queue, - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || - atomic_read(&dev_priv->mm.wedged)); + wait_event(ring->irq_queue, + i915_seqno_passed( + ring->get_gem_seqno(dev, ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); - i915_user_irq_put(dev); - dev_priv->mm.waiting_gem_seqno = 0; + ring->user_irq_put(dev, ring); + ring->waiting_gem_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); } @@ -1863,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d)\n", - __func__, ret, seqno, i915_get_gem_seqno(dev)); + __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); /* Directly dispatch request retiring. While we have the work queue * to handle this, the waiter on a request often wants an associated @@ -1871,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) * a separate wait queue to handle that. */ if (ret == 0) - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev, ring); return ret; } @@ -1881,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) * request and object lists appropriately for that event. */ static int -i915_wait_request(struct drm_device *dev, uint32_t seqno) +i915_wait_request(struct drm_device *dev, uint32_t seqno, + struct intel_ring_buffer *ring) { - return i915_do_wait_request(dev, seqno, 1); + return i915_do_wait_request(dev, seqno, 1, ring); } static void @@ -1892,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev, uint32_t flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t cmd; - RING_LOCALS; - -#if WATCH_EXEC - DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, - invalidate_domains, flush_domains); -#endif - trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, - invalidate_domains, flush_domains); - if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); + dev_priv->render_ring.flush(dev, &dev_priv->render_ring, + invalidate_domains, + flush_domains); + + if (HAS_BSD(dev)) + dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, + invalidate_domains, + flush_domains); +} - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { - /* - * read/write caches: - * - * I915_GEM_DOMAIN_RENDER is always invalidated, but is - * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is - * also flushed at 2d versus 3d pipeline switches. - * - * read-only caches: - * - * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if - * MI_READ_FLUSH is set, and is always flushed on 965. - * - * I915_GEM_DOMAIN_COMMAND may not exist? - * - * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is - * invalidated when MI_EXE_FLUSH is set. - * - * I915_GEM_DOMAIN_VERTEX, which exists on 965, is - * invalidated with every MI_FLUSH. - * - * TLBs: - * - * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND - * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and - * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER - * are flushed at any MI_FLUSH. - */ - - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & - I915_GEM_DOMAIN_RENDER) - cmd &= ~MI_NO_WRITE_FLUSH; - if (!IS_I965G(dev)) { - /* - * On the 965, the sampler cache always gets flushed - * and this bit is reserved. - */ - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) - cmd |= MI_READ_FLUSH; - } - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) - cmd |= MI_EXE_FLUSH; - -#if WATCH_EXEC - DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); -#endif - BEGIN_LP_RING(2); - OUT_RING(cmd); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - } +static void +i915_gem_flush_ring(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains, + struct intel_ring_buffer *ring) +{ + if (flush_domains & I915_GEM_DOMAIN_CPU) + drm_agp_chipset_flush(dev); + ring->flush(dev, ring, + invalidate_domains, + flush_domains); } /** @@ -1967,7 +1887,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; /* This function only exists to support waiting for existing rendering, @@ -1983,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) DRM_INFO("%s: object %p wait for seqno %08x\n", __func__, obj, obj_priv->last_rendering_seqno); #endif - ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); + ret = i915_wait_request(dev, + obj_priv->last_rendering_seqno, obj_priv->ring); if (ret != 0) return ret; } @@ -1999,7 +1920,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; #if WATCH_BUF @@ -2077,7 +1998,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) /* Try to find the smallest clean object */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - struct drm_gem_object *obj = obj_priv->obj; + struct drm_gem_object *obj = &obj_priv->base; if (obj->size >= min_size) { if ((!obj_priv->dirty || i915_gem_object_is_purgeable(obj_priv)) && @@ -2099,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - uint32_t seqno; + uint32_t seqno1, seqno2; + int ret; spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list); + lists_empty = (list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->render_ring.active_list) && + (!HAS_BSD(dev) || + list_empty(&dev_priv->bsd_ring.active_list))); spin_unlock(&dev_priv->mm.active_list_lock); if (lists_empty) @@ -2111,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); - if (seqno == 0) + seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, + &dev_priv->render_ring); + if (seqno1 == 0) return -ENOMEM; + ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); + + if (HAS_BSD(dev)) { + seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, + &dev_priv->bsd_ring); + if (seqno2 == 0) + return -ENOMEM; + + ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); + if (ret) + return ret; + } - return i915_wait_request(dev, seqno); + + return ret; } static int @@ -2128,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev) spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)); + list_empty(&dev_priv->render_ring.active_list) && + (!HAS_BSD(dev) + || list_empty(&dev_priv->bsd_ring.active_list))); spin_unlock(&dev_priv->mm.active_list_lock); if (lists_empty) @@ -2148,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev) spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)); + list_empty(&dev_priv->render_ring.active_list) && + (!HAS_BSD(dev) + || list_empty(&dev_priv->bsd_ring.active_list))); spin_unlock(&dev_priv->mm.active_list_lock); BUG_ON(!lists_empty); @@ -2162,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) struct drm_gem_object *obj; int ret; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; for (;;) { - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev, render_ring); + + if (HAS_BSD(dev)) + i915_gem_retire_requests(dev, bsd_ring); /* If there's an inactive buffer available now, grab it * and be done. @@ -2175,7 +2122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) #if WATCH_LRU DRM_INFO("%s: evicting %p\n", __func__, obj); #endif - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); BUG_ON(obj_priv->pin_count != 0); BUG_ON(obj_priv->active); @@ -2187,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) * things, wait for the next to finish and hopefully leave us * a buffer to evict. */ - if (!list_empty(&dev_priv->mm.request_list)) { + if (!list_empty(&render_ring->request_list)) { struct drm_i915_gem_request *request; - request = list_first_entry(&dev_priv->mm.request_list, + request = list_first_entry(&render_ring->request_list, struct drm_i915_gem_request, list); - ret = i915_wait_request(dev, request->seqno); + ret = i915_wait_request(dev, + request->seqno, request->ring); + if (ret) + return ret; + + continue; + } + + if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&bsd_ring->request_list, + struct drm_i915_gem_request, + list); + + ret = i915_wait_request(dev, + request->seqno, request->ring); if (ret) return ret; @@ -2211,7 +2174,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) /* Find an object that we can immediately reuse */ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { - obj = obj_priv->obj; + obj = &obj_priv->base; if (obj->size >= min_size) break; @@ -2221,17 +2184,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) if (obj != NULL) { uint32_t seqno; - i915_gem_flush(dev, + i915_gem_flush_ring(dev, obj->write_domain, - obj->write_domain); - seqno = i915_add_request(dev, NULL, obj->write_domain); + obj->write_domain, + obj_priv->ring); + seqno = i915_add_request(dev, NULL, + obj->write_domain, + obj_priv->ring); if (seqno == 0) return -ENOMEM; - - ret = i915_wait_request(dev, seqno); - if (ret) - return ret; - continue; } } @@ -2251,12 +2212,14 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count, i; struct address_space *mapping; struct inode *inode; struct page *page; - int ret; + + BUG_ON(obj_priv->pages_refcount + == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); if (obj_priv->pages_refcount++ != 0) return 0; @@ -2276,14 +2239,13 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, mapping = inode->i_mapping; for (i = 0; i < page_count; i++) { page = read_cache_page_gfp(mapping, i, - mapping_gfp_mask (mapping) | + GFP_HIGHUSER | __GFP_COLD | + __GFP_RECLAIMABLE | gfpmask); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - i915_gem_object_put_pages(obj); - return ret; - } + if (IS_ERR(page)) + goto err_pages; + obj_priv->pages[i] = page; } @@ -2291,6 +2253,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, i915_gem_object_do_bit_17_swizzle(obj); return 0; + +err_pages: + while (i--) + page_cache_release(obj_priv->pages[i]); + + drm_free_large(obj_priv->pages); + obj_priv->pages = NULL; + obj_priv->pages_refcount--; + return PTR_ERR(page); } static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) @@ -2298,7 +2269,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint64_t val; @@ -2320,7 +2291,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint64_t val; @@ -2340,7 +2311,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; int tile_width; uint32_t fence_reg, val; @@ -2363,6 +2334,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) pitch_val = obj_priv->stride / tile_width; pitch_val = ffs(pitch_val) - 1; + if (obj_priv->tiling_mode == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(dev)) + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); + else + WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); + val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; @@ -2382,7 +2359,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint32_t val; uint32_t pitch_val; @@ -2426,7 +2403,7 @@ static int i915_find_fence_reg(struct drm_device *dev) if (!reg->obj) return i; - obj_priv = reg->obj->driver_private; + obj_priv = to_intel_bo(reg->obj); if (!obj_priv->pin_count) avail++; } @@ -2436,9 +2413,10 @@ static int i915_find_fence_reg(struct drm_device *dev) /* None available, try to steal one or wait for a user to finish */ i = I915_FENCE_REG_NONE; - list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, - fence_list) { - obj = obj_priv->obj; + list_for_each_entry(reg, &dev_priv->mm.fence_list, + lru_list) { + obj = reg->obj; + obj_priv = to_intel_bo(obj); if (obj_priv->pin_count) continue; @@ -2481,13 +2459,14 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = NULL; int ret; /* Just update our place in the LRU if our fence is getting used. */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); + reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); return 0; } @@ -2517,7 +2496,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) obj_priv->fence_reg = ret; reg = &dev_priv->fence_regs[obj_priv->fence_reg]; - list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); + list_add_tail(®->lru_list, &dev_priv->mm.fence_list); reg->obj = obj; @@ -2548,7 +2527,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_fence_reg *reg = + &dev_priv->fence_regs[obj_priv->fence_reg]; if (IS_GEN6(dev)) { I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + @@ -2567,9 +2548,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) I915_WRITE(fence_reg, 0); } - dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; + reg->obj = NULL; obj_priv->fence_reg = I915_FENCE_REG_NONE; - list_del_init(&obj_priv->fence_list); + list_del_init(®->lru_list); } /** @@ -2584,7 +2565,7 @@ int i915_gem_object_put_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->fence_reg == I915_FENCE_REG_NONE) return 0; @@ -2622,7 +2603,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; int ret; @@ -2639,6 +2620,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) return -EINVAL; } + /* If the object is bigger than the entire aperture, reject it early + * before evicting everything in a vain attempt to find space. + */ + if (obj->size > dev->gtt_total) { + DRM_ERROR("Attempting to bind an object larger than the aperture\n"); + return -E2BIG; + } + search_free: free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, obj->size, alignment, 0); @@ -2729,7 +2718,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) void i915_gem_clflush_object(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen @@ -2749,6 +2738,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; uint32_t old_write_domain; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) return; @@ -2756,7 +2746,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - (void) i915_add_request(dev, NULL, obj->write_domain); + (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); BUG_ON(obj->write_domain); trace_i915_gem_object_change_domain(obj, @@ -2830,7 +2820,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; @@ -2880,7 +2870,7 @@ int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; @@ -2896,23 +2886,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) DRM_INFO("%s: object %p wait for seqno %08x\n", __func__, obj, obj_priv->last_rendering_seqno); #endif - ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); + ret = i915_do_wait_request(dev, + obj_priv->last_rendering_seqno, + 0, + obj_priv->ring); if (ret != 0) return ret; } + i915_gem_object_flush_cpu_write_domain(obj); + old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; - obj->read_domains &= I915_GEM_DOMAIN_GTT; - - i915_gem_object_flush_cpu_write_domain(obj); - /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains |= I915_GEM_DOMAIN_GTT; + obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT; obj_priv->dirty = 1; @@ -3093,7 +3084,7 @@ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; uint32_t old_read_domains; @@ -3178,7 +3169,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (!obj_priv->page_cpu_valid) return; @@ -3218,7 +3209,7 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int i, ret; @@ -3287,7 +3278,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int i, ret; void __iomem *reloc_page; bool need_fence; @@ -3296,9 +3287,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, obj_priv->tiling_mode != I915_TILING_NONE; /* Check fence reg constraints and rebind if necessary */ - if (need_fence && !i915_gem_object_fence_offset_ok(obj, - obj_priv->tiling_mode)) - i915_gem_object_unbind(obj); + if (need_fence && + !i915_gem_object_fence_offset_ok(obj, + obj_priv->tiling_mode)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } /* Choose the GTT offset for our buffer and put it there. */ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); @@ -3312,9 +3307,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, if (need_fence) { ret = i915_gem_object_get_fence_reg(obj); if (ret != 0) { - if (ret != -EBUSY && ret != -ERESTARTSYS) - DRM_ERROR("Failure to install fence: %d\n", - ret); i915_gem_object_unpin(obj); return ret; } @@ -3338,7 +3330,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, i915_gem_object_unpin(obj); return -EBADF; } - target_obj_priv = target_obj->driver_private; + target_obj_priv = to_intel_bo(target_obj); #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " @@ -3487,62 +3479,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return 0; } -/** Dispatch a batchbuffer to the ring - */ -static int -i915_dispatch_gem_execbuffer(struct drm_device *dev, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int nbox = exec->num_cliprects; - int i = 0, count; - uint32_t exec_start, exec_len; - RING_LOCALS; - - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; - - trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); - - count = nbox ? nbox : 1; - - for (i = 0; i < count; i++) { - if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - exec->DR1, exec->DR4); - if (ret) - return ret; - } - - if (IS_I830(dev) || IS_845G(dev)) { - BEGIN_LP_RING(4); - OUT_RING(MI_BATCH_BUFFER); - OUT_RING(exec_start | MI_BATCH_NON_SECURE); - OUT_RING(exec_start + exec_len - 4); - OUT_RING(0); - ADVANCE_LP_RING(); - } else { - BEGIN_LP_RING(2); - if (IS_I965G(dev)) { - OUT_RING(MI_BATCH_BUFFER_START | - (2 << 6) | - MI_BATCH_NON_SECURE_I965); - OUT_RING(exec_start); - } else { - OUT_RING(MI_BATCH_BUFFER_START | - (2 << 6)); - OUT_RING(exec_start | MI_BATCH_NON_SECURE); - } - ADVANCE_LP_RING(); - } - } - - /* XXX breadcrumb */ - return 0; -} - /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3571,7 +3507,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) if (time_after_eq(request->emitted_jiffies, recent_enough)) break; - ret = i915_wait_request(dev, request->seqno); + ret = i915_wait_request(dev, request->seqno, request->ring); if (ret != 0) break; } @@ -3690,7 +3626,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, prepare_to_wait(&dev_priv->pending_flip_queue, &wait, TASK_INTERRUPTIBLE); for (i = 0; i < count; i++) { - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); if (atomic_read(&obj_priv->pending_flip) > 0) break; } @@ -3711,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, return ret; } + int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv, @@ -3728,10 +3665,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, uint32_t seqno, flush_domains, reloc_index; int pin_tries, flips; + struct intel_ring_buffer *ring = NULL; + #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); #endif + if (args->flags & I915_EXEC_BSD) { + if (!HAS_BSD(dev)) { + DRM_ERROR("execbuf with wrong flag\n"); + return -EINVAL; + } + ring = &dev_priv->bsd_ring; + } else { + ring = &dev_priv->render_ring; + } + if (args->buffer_count < 1) { DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); @@ -3799,7 +3748,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); @@ -3844,11 +3793,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret != -ENOSPC || pin_tries >= 1) { if (ret != -ERESTARTSYS) { unsigned long long total_size = 0; - for (i = 0; i < args->buffer_count; i++) + int num_fences = 0; + for (i = 0; i < args->buffer_count; i++) { + obj_priv = to_intel_bo(object_list[i]); + total_size += object_list[i]->size; - DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", + num_fences += + exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && + obj_priv->tiling_mode != I915_TILING_NONE; + } + DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", pinned+1, args->buffer_count, - total_size, ret); + total_size, num_fences, + ret); DRM_ERROR("%d objects [%d pinned], " "%d object bytes [%d pinned], " "%d/%d gtt bytes\n", @@ -3918,14 +3875,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains & I915_GEM_GPU_DOMAINS) + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { (void)i915_add_request(dev, file_priv, - dev->flush_domains); + dev->flush_domains, + &dev_priv->render_ring); + + if (HAS_BSD(dev)) + (void)i915_add_request(dev, file_priv, + dev->flush_domains, + &dev_priv->bsd_ring); + } } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; @@ -3957,7 +3921,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, #endif /* Exec the batchbuffer */ - ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); + ret = ring->dispatch_gem_execbuffer(dev, ring, args, + cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -3967,7 +3932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * Ensure that the commands in the batch buffer are * finished before the interrupt fires */ - flush_domains = i915_retire_commands(dev); + flush_domains = i915_retire_commands(dev, ring); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3978,12 +3943,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * *some* interrupts representing completion of buffers that we can * wait on when trying to clear up gtt space). */ - seqno = i915_add_request(dev, file_priv, flush_domains); + seqno = i915_add_request(dev, file_priv, flush_domains, ring); BUG_ON(seqno == 0); for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + obj_priv = to_intel_bo(obj); - i915_gem_object_move_to_active(obj, seqno); + i915_gem_object_move_to_active(obj, seqno, ring); #if WATCH_LRU DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif @@ -4000,7 +3966,7 @@ err: for (i = 0; i < args->buffer_count; i++) { if (object_list[i]) { - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); obj_priv->in_execbuffer = false; } drm_gem_object_unreference(object_list[i]); @@ -4095,7 +4061,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.DR4 = args->DR4; exec2.num_cliprects = args->num_cliprects; exec2.cliprects_ptr = args->cliprects_ptr; - exec2.flags = 0; + exec2.flags = I915_EXEC_RENDER; ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); if (!ret) { @@ -4178,10 +4144,23 @@ int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; + BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + i915_verify_inactive(dev, __FILE__, __LINE__); + + if (obj_priv->gtt_space != NULL) { + if (alignment == 0) + alignment = i915_gem_get_gtt_alignment(obj); + if (obj_priv->gtt_offset & (alignment - 1)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } + } + if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret) @@ -4211,7 +4190,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); i915_verify_inactive(dev, __FILE__, __LINE__); obj_priv->pin_count--; @@ -4251,7 +4230,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return -EBADF; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); @@ -4308,7 +4287,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, return -EBADF; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); @@ -4334,6 +4313,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + drm_i915_private_t *dev_priv = dev->dev_private; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { @@ -4348,9 +4328,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * actually unmasked, and our working set ends up being larger than * required. */ - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev, &dev_priv->render_ring); + + if (HAS_BSD(dev)) + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting * flushed (because nobody's flushing that domain) won't ever return @@ -4396,7 +4379,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->pin_count) { drm_gem_object_unreference(obj); @@ -4422,34 +4405,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return 0; } -int i915_gem_init_object(struct drm_gem_object *obj) +struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, + size_t size) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); - if (obj_priv == NULL) - return -ENOMEM; + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (obj == NULL) + return NULL; - /* - * We've just allocated pages from the kernel, - * so they've just been written by the CPU with - * zeros. They'll need to be clflushed before we - * use them with the GPU. - */ - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; + if (drm_gem_object_init(dev, &obj->base, size) != 0) { + kfree(obj); + return NULL; + } - obj_priv->agp_type = AGP_USER_MEMORY; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; - obj->driver_private = obj_priv; - obj_priv->obj = obj; - obj_priv->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj_priv->list); - INIT_LIST_HEAD(&obj_priv->gpu_write_list); - INIT_LIST_HEAD(&obj_priv->fence_list); - obj_priv->madv = I915_MADV_WILLNEED; + obj->agp_type = AGP_USER_MEMORY; + obj->base.driver_private = NULL; + obj->fence_reg = I915_FENCE_REG_NONE; + INIT_LIST_HEAD(&obj->list); + INIT_LIST_HEAD(&obj->gpu_write_list); + obj->madv = I915_MADV_WILLNEED; - trace_i915_gem_object_create(obj); + trace_i915_gem_object_create(&obj->base); + + return &obj->base; +} + +int i915_gem_init_object(struct drm_gem_object *obj) +{ + BUG(); return 0; } @@ -4457,7 +4444,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); trace_i915_gem_object_destroy(obj); @@ -4472,9 +4459,11 @@ void i915_gem_free_object(struct drm_gem_object *obj) if (obj_priv->mmap_offset) i915_gem_free_mmap_offset(obj); + drm_gem_object_release(obj); + kfree(obj_priv->page_cpu_valid); kfree(obj_priv->bit_17); - kfree(obj->driver_private); + kfree(obj_priv); } /** Unbinds all inactive objects. */ @@ -4487,9 +4476,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev) struct drm_gem_object *obj; int ret; - obj = list_first_entry(&dev_priv->mm.inactive_list, - struct drm_i915_gem_object, - list)->obj; + obj = &list_first_entry(&dev_priv->mm.inactive_list, + struct drm_i915_gem_object, + list)->base; ret = i915_gem_object_unbind(obj); if (ret != 0) { @@ -4509,7 +4498,10 @@ i915_gem_idle(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { + if (dev_priv->mm.suspended || + (dev_priv->render_ring.gem_object == NULL) || + (HAS_BSD(dev) && + dev_priv->bsd_ring.gem_object == NULL)) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4547,190 +4539,107 @@ i915_gem_idle(struct drm_device *dev) return 0; } +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ static int -i915_gem_init_hws(struct drm_device *dev) +i915_gem_init_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret; - /* If we need a physical address for the status page, it's already - * initialized at driver load time. - */ - if (!I915_NEED_GFX_HWS(dev)) - return 0; - - obj = drm_gem_object_alloc(dev, 4096); + obj = i915_gem_alloc_object(dev, 4096); if (obj == NULL) { - DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096); - if (ret != 0) { - drm_gem_object_unreference(obj); - return ret; - } + if (ret) + goto err_unref; - dev_priv->status_gfx_addr = obj_priv->gtt_offset; + dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; + dev_priv->seqno_page = kmap(obj_priv->pages[0]); + if (dev_priv->seqno_page == NULL) + goto err_unpin; - dev_priv->hw_status_page = kmap(obj_priv->pages[0]); - if (dev_priv->hw_status_page == NULL) { - DRM_ERROR("Failed to map status page.\n"); - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - return -EINVAL; - } - dev_priv->hws_obj = obj; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - if (IS_GEN6(dev)) { - I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); - I915_READ(HWS_PGA_GEN6); /* posting read */ - } else { - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - I915_READ(HWS_PGA); /* posting read */ - } - DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); + dev_priv->seqno_obj = obj; + memset(dev_priv->seqno_page, 0, PAGE_SIZE); return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(obj); +err: + return ret; } + static void -i915_gem_cleanup_hws(struct drm_device *dev) +i915_gem_cleanup_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - if (dev_priv->hws_obj == NULL) - return; - - obj = dev_priv->hws_obj; - obj_priv = obj->driver_private; - + obj = dev_priv->seqno_obj; + obj_priv = to_intel_bo(obj); kunmap(obj_priv->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; + dev_priv->seqno_obj = NULL; - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); + dev_priv->seqno_page = NULL; } int i915_gem_init_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - drm_i915_ring_buffer_t *ring = &dev_priv->ring; int ret; - u32 head; - ret = i915_gem_init_hws(dev); - if (ret != 0) - return ret; + dev_priv->render_ring = render_ring; - obj = drm_gem_object_alloc(dev, 128 * 1024); - if (obj == NULL) { - DRM_ERROR("Failed to allocate ringbuffer\n"); - i915_gem_cleanup_hws(dev); - return -ENOMEM; + if (!I915_NEED_GFX_HWS(dev)) { + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; + memset(dev_priv->render_ring.status_page.page_addr, + 0, PAGE_SIZE); } - obj_priv = obj->driver_private; - ret = i915_gem_object_pin(obj, 4096); - if (ret != 0) { - drm_gem_object_unreference(obj); - i915_gem_cleanup_hws(dev); - return ret; + if (HAS_PIPE_CONTROL(dev)) { + ret = i915_gem_init_pipe_control(dev); + if (ret) + return ret; } - /* Set up the kernel mapping for the ring. */ - ring->Size = obj->size; - - ring->map.offset = dev->agp->base + obj_priv->gtt_offset; - ring->map.size = obj->size; - ring->map.type = 0; - ring->map.flags = 0; - ring->map.mtrr = 0; - - drm_core_ioremap_wc(&ring->map, dev); - if (ring->map.handle == NULL) { - DRM_ERROR("Failed to map ringbuffer.\n"); - memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - i915_gem_cleanup_hws(dev); - return -EINVAL; - } - ring->ring_obj = obj; - ring->virtual_start = ring->map.handle; - - /* Stop the ring if it's running. */ - I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_TAIL, 0); - I915_WRITE(PRB0_HEAD, 0); - - /* Initialize the ring. */ - I915_WRITE(PRB0_START, obj_priv->gtt_offset); - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - - /* G45 ring initialization fails to reset head to zero */ - if (head != 0) { - DRM_ERROR("Ring head not reset to zero " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); - I915_WRITE(PRB0_HEAD, 0); - - DRM_ERROR("Ring head forced to zero " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); - } - - I915_WRITE(PRB0_CTL, - ((obj->size - 4096) & RING_NR_PAGES) | - RING_NO_REPORT | - RING_VALID); - - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - - /* If the head is still not zero, the ring is dead */ - if (head != 0) { - DRM_ERROR("Ring initialization failed " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); - return -EIO; - } + ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); + if (ret) + goto cleanup_pipe_control; - /* Update our cache of the ring state */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); - else { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->Size; + if (HAS_BSD(dev)) { + dev_priv->bsd_ring = bsd_ring; + ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + if (ret) + goto cleanup_render_ring; } return 0; + +cleanup_render_ring: + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); +cleanup_pipe_control: + if (HAS_PIPE_CONTROL(dev)) + i915_gem_cleanup_pipe_control(dev); + return ret; } void @@ -4738,17 +4647,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - if (dev_priv->ring.ring_obj == NULL) - return; - - drm_core_ioremapfree(&dev_priv->ring.map, dev); - - i915_gem_object_unpin(dev_priv->ring.ring_obj); - drm_gem_object_unreference(dev_priv->ring.ring_obj); - dev_priv->ring.ring_obj = NULL; - memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - - i915_gem_cleanup_hws(dev); + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); + if (HAS_BSD(dev)) + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + if (HAS_PIPE_CONTROL(dev)) + i915_gem_cleanup_pipe_control(dev); } int @@ -4776,12 +4679,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } spin_lock(&dev_priv->mm.active_list_lock); - BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); + BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); spin_unlock(&dev_priv->mm.active_list_lock); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->mm.request_list)); + BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); + BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); mutex_unlock(&dev->struct_mutex); drm_irq_install(dev); @@ -4820,20 +4725,34 @@ i915_gem_load(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; spin_lock_init(&dev_priv->mm.active_list_lock); - INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); - INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); + INIT_LIST_HEAD(&dev_priv->render_ring.active_list); + INIT_LIST_HEAD(&dev_priv->render_ring.request_list); + if (HAS_BSD(dev)) { + INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); + INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); + } + for (i = 0; i < 16; i++) + INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); - dev_priv->mm.next_gem_seqno = 1; - spin_lock(&shrink_list_lock); list_add(&dev_priv->mm.shrink_list, &shrink_list); spin_unlock(&shrink_list_lock); + /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ + if (IS_GEN3(dev)) { + u32 tmp = I915_READ(MI_ARB_STATE); + if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { + /* arb state is a masked write, so set bit + bit in mask */ + tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); + I915_WRITE(MI_ARB_STATE, tmp); + } + } + /* Old X drivers will take 0-2 for front, back, depth buffers */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; @@ -4932,7 +4851,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, int ret; int page_count; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (!obj_priv->phys_obj) return; @@ -4971,7 +4890,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (id > I915_MAX_PHYS_OBJECT) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->phys_obj) { if (obj_priv->phys_obj->id == id) @@ -5022,7 +4941,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); void *obj_addr; int ret; char __user *user_data; @@ -5054,7 +4973,23 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) } static int -i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) +i915_gpu_is_active(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int lists_empty; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->render_ring.active_list); + if (HAS_BSD(dev)) + lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); + spin_unlock(&dev_priv->mm.active_list_lock); + + return !lists_empty; +} + +static int +i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { drm_i915_private_t *dev_priv, *next_dev; struct drm_i915_gem_object *obj_priv, *next_obj; @@ -5082,6 +5017,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) spin_lock(&shrink_list_lock); +rescan: /* first scan for clean buffers */ list_for_each_entry_safe(dev_priv, next_dev, &shrink_list, mm.shrink_list) { @@ -5091,14 +5027,16 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) continue; spin_unlock(&shrink_list_lock); + i915_gem_retire_requests(dev, &dev_priv->render_ring); - i915_gem_retire_requests(dev); + if (HAS_BSD(dev)) + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, list) { if (i915_gem_object_is_purgeable(obj_priv)) { - i915_gem_object_unbind(obj_priv->obj); + i915_gem_object_unbind(&obj_priv->base); if (--nr_to_scan <= 0) break; } @@ -5127,7 +5065,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) &dev_priv->mm.inactive_list, list) { if (nr_to_scan > 0) { - i915_gem_object_unbind(obj_priv->obj); + i915_gem_object_unbind(&obj_priv->base); nr_to_scan--; } else cnt++; @@ -5139,6 +5077,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) would_deadlock = 0; } + if (nr_to_scan) { + int active = 0; + + /* + * We are desperate for pages, so as a last resort, wait + * for the GPU to finish and discard whatever we can. + * This has a dramatic impact to reduce the number of + * OOM-killer events whilst running the GPU aggressively. + */ + list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { + struct drm_device *dev = dev_priv->dev; + + if (!mutex_trylock(&dev->struct_mutex)) + continue; + + spin_unlock(&shrink_list_lock); + + if (i915_gpu_is_active(dev)) { + i915_gpu_idle(dev); + active++; + } + + spin_lock(&shrink_list_lock); + mutex_unlock(&dev->struct_mutex); + } + + if (active) + goto rescan; + } + spin_unlock(&shrink_list_lock); if (would_deadlock) diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index e602614bd3f8..80f380b1d951 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line) struct drm_i915_gem_object *obj_priv; list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - obj = obj_priv->obj; + obj = &obj_priv->base; if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))) @@ -72,7 +72,7 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); @@ -137,7 +137,7 @@ void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; uint32_t *gtt_mapping; uint32_t *backing_map = NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b5c55d88ff76..4b7c49d4257d 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; - } else if (IS_I9XX(dev)) { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - - /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) - * instead of 4 (2KB) on 945s. - */ - if (pitch_val > I915_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 20)) + } else if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (stride > 8192) return false; - } else { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - if (pitch_val > I830_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 19)) - return false; + if (IS_GEN3(dev)) { + if (size > I830_FENCE_MAX_SIZE_VAL << 20) + return false; + } else { + if (size > I830_FENCE_MAX_SIZE_VAL << 19) + return false; + } } /* 965+ just needs multiples of tile width */ @@ -240,7 +236,7 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->gtt_space == NULL) return true; @@ -280,13 +276,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(obj); return -EINVAL; } + if (obj_priv->pin_count) { + drm_gem_object_unreference_unlocked(obj); + return -EBUSY; + } + if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; @@ -325,9 +326,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * need to ensure that any fence register is cleared. */ if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) - ret = i915_gem_object_unbind(obj); + ret = i915_gem_object_unbind(obj); + else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + ret = i915_gem_object_put_fence_reg(obj); else - ret = i915_gem_object_put_fence_reg(obj); + i915_gem_release_mmap(obj); + if (ret != 0) { WARN(ret != -ERESTARTSYS, "failed to reset object for tiling switch"); @@ -361,7 +365,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); @@ -424,7 +428,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; @@ -453,7 +457,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5388354da0d1..dba53d4b9fb3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -27,6 +27,7 @@ */ #include <linux/sysrq.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -52,7 +53,7 @@ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) /** Interrupts that we mask and unmask at runtime. */ -#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) +#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ PIPE_VBLANK_INTERRUPT_STATUS) @@ -73,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) } } -static inline void +void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->gt_irq_mask_reg & mask) != mask) { @@ -114,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) } } -static inline void +void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask_reg & mask) != mask) { @@ -168,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); - else + else { i915_enable_pipestat(dev_priv, 1, I915_LEGACY_BLC_EVENT_ENABLE); + if (IS_I965G(dev)) + i915_enable_pipestat(dev_priv, 0, + I915_LEGACY_BLC_EVENT_ENABLE); + } } /** @@ -255,28 +260,27 @@ static void i915_hotplug_work_func(struct work_struct *work) hotplug_work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; + struct drm_encoder *encoder; - if (mode_config->num_connector) { - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); + if (mode_config->num_encoder) { + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - if (intel_output->hot_plug) - (*intel_output->hot_plug) (intel_output); + if (intel_encoder->hot_plug) + (*intel_encoder->hot_plug) (intel_encoder); } } /* Just fire off a uevent and let userspace tell us what to do */ - drm_sysfs_hotplug_event(dev); + drm_helper_hpd_irq_event(dev); } static void i915_handle_rps_change(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; - u16 rgvswctl; u8 new_delay = dev_priv->cur_delay; - I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); + I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); busy_up = I915_READ(RCPREVBSYTUPAVG); busy_down = I915_READ(RCPREVBSYTDNAVG); max_avg = I915_READ(RCBMAXAVG); @@ -295,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev) new_delay = dev_priv->min_delay; } - DRM_DEBUG("rps change requested: %d -> %d\n", - dev_priv->cur_delay, new_delay); - - rgvswctl = I915_READ(MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_ERROR("gpu busy, RCS change rejected\n"); - return; /* still busy with another command */ - } - - /* Program the new state */ - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE(MEMSWCTL, rgvswctl); - POSTING_READ(MEMSWCTL); - - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); - - dev_priv->cur_delay = new_delay; - - DRM_DEBUG("rps changed\n"); + if (ironlake_set_drps(dev, new_delay)) + dev_priv->cur_delay = new_delay; return; } @@ -326,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir; struct drm_i915_master_private *master_priv; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); @@ -348,14 +334,17 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_USER_INTERRUPT) { - u32 seqno = i915_get_gem_seqno(dev); - dev_priv->mm.irq_gem_seqno = seqno; + if (gt_iir & GT_PIPE_NOTIFY) { + u32 seqno = render_ring->get_gem_seqno(dev, render_ring); + render_ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->irq_queue); + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); } + if (gt_iir & GT_BSD_USER_INTERRUPT) + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); @@ -383,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) } if (de_iir & DE_PCU_EVENT) { - I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); i915_handle_rps_change(dev); } @@ -443,7 +432,7 @@ i915_error_object_create(struct drm_device *dev, if (src == NULL) return NULL; - src_priv = src->driver_private; + src_priv = to_intel_bo(src); if (src_priv->pages == NULL) return NULL; @@ -455,11 +444,15 @@ i915_error_object_create(struct drm_device *dev, for (page = 0; page < page_count; page++) { void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + unsigned long flags; + if (d == NULL) goto unwind; - s = kmap_atomic(src_priv->pages[page], KM_USER0); + local_irq_save(flags); + s = kmap_atomic(src_priv->pages[page], KM_IRQ0); memcpy(d, s, PAGE_SIZE); - kunmap_atomic(s, KM_USER0); + kunmap_atomic(s, KM_IRQ0); + local_irq_restore(flags); dst->pages[page] = d; } dst->page_count = page_count; @@ -527,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev) */ bbaddr = 0; head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring = (u32 *)(dev_priv->ring.virtual_start + head); + ring = (u32 *)(dev_priv->render_ring.virtual_start + head); - while (--ring >= (u32 *)dev_priv->ring.virtual_start) { + while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { bbaddr = i915_get_bbaddr(dev, ring); if (bbaddr) break; } if (bbaddr == 0) { - ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); - while (--ring >= (u32 *)dev_priv->ring.virtual_start) { + ring = (u32 *)(dev_priv->render_ring.virtual_start + + dev_priv->render_ring.size); + while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { bbaddr = i915_get_bbaddr(dev, ring); if (bbaddr) break; @@ -578,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev) return; } - error->seqno = i915_get_gem_seqno(dev); + error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -606,8 +600,10 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { - struct drm_gem_object *obj = obj_priv->obj; + list_for_each_entry(obj_priv, + &dev_priv->render_ring.active_list, list) { + + struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && bbaddr >= obj_priv->gtt_offset && @@ -630,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev) error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); /* Record the ringbuffer */ - error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); + error->ringbuffer = i915_error_object_create(dev, + dev_priv->render_ring.gem_object); /* Record buffers on the active list. */ error->active_bo = NULL; @@ -642,8 +639,9 @@ static void i915_capture_error_state(struct drm_device *dev) if (error->active_bo) { int i = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { - struct drm_gem_object *obj = obj_priv->obj; + list_for_each_entry(obj_priv, + &dev_priv->render_ring.active_list, list) { + struct drm_gem_object *obj = &obj_priv->base; error->active_bo[i].size = obj->size; error->active_bo[i].name = obj->name; @@ -694,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev) i915_error_state_free(dev, error); } -/** - * i915_handle_error - handle an error interrupt - * @dev: drm device - * - * Do some basic checking of regsiter state at error interrupt time and - * dump it to the syslog. Also call i915_capture_error_state() to make - * sure we get a record and make it available in debugfs. Fire a uevent - * so userspace knows something bad happened (should trigger collection - * of a ring dump etc.). - */ -static void i915_handle_error(struct drm_device *dev, bool wedged) +static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 eir = I915_READ(EIR); - u32 pipea_stats = I915_READ(PIPEASTAT); - u32 pipeb_stats = I915_READ(PIPEBSTAT); - i915_capture_error_state(dev); + if (!eir) + return; printk(KERN_ERR "render error detected, EIR: 0x%08x\n", eir); @@ -757,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) } if (eir & I915_ERROR_MEMORY_REFRESH) { + u32 pipea_stats = I915_READ(PIPEASTAT); + u32 pipeb_stats = I915_READ(PIPEBSTAT); + printk(KERN_ERR "memory refresh error\n"); printk(KERN_ERR "PIPEASTAT: 0x%08x\n", pipea_stats); @@ -813,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) I915_WRITE(EMR, I915_READ(EMR) | eir); I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); } +} + +/** + * i915_handle_error - handle an error interrupt + * @dev: drm device + * + * Do some basic checking of regsiter state at error interrupt time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +static void i915_handle_error(struct drm_device *dev, bool wedged) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + i915_capture_error_state(dev); + i915_report_and_clear_eir(dev); if (wedged) { atomic_set(&dev_priv->mm.wedged, 1); @@ -820,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) /* * Wakeup waiting processes so they don't hang */ - DRM_WAKEUP(&dev_priv->irq_queue); + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -839,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) unsigned long irqflags; int irq_received; int ret = IRQ_NONE; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; atomic_inc(&dev_priv->irq_received); @@ -919,33 +928,46 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) { - u32 seqno = i915_get_gem_seqno(dev); - dev_priv->mm.irq_gem_seqno = seqno; + u32 seqno = + render_ring->get_gem_seqno(dev, render_ring); + render_ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->irq_queue); + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); } - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) + if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); + if (dev_priv->flip_pending_is_done) + intel_finish_page_flip_plane(dev, 0); + } - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 1); + if (dev_priv->flip_pending_is_done) + intel_finish_page_flip_plane(dev, 1); + } if (pipea_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 0); - intel_finish_page_flip(dev, 0); + if (!dev_priv->flip_pending_is_done) + intel_finish_page_flip(dev, 0); } if (pipeb_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 1); - intel_finish_page_flip(dev, 1); + if (!dev_priv->flip_pending_is_done) + intel_finish_page_flip(dev, 1); } - if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || + (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || (iir & I915_ASLE_INTERRUPT)) opregion_asle_intr(dev); @@ -974,7 +996,6 @@ static int i915_emit_irq(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; i915_kernel_lost_context(dev); @@ -996,43 +1017,13 @@ static int i915_emit_irq(struct drm_device * dev) return dev_priv->counter; } -void i915_user_irq_get(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { - if (HAS_PCH_SPLIT(dev)) - ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); - else - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); - } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); -} - -void i915_user_irq_put(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { - if (HAS_PCH_SPLIT(dev)) - ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); - else - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); - } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); -} - void i915_trace_irq_get(struct drm_device *dev, u32 seqno) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; if (dev_priv->trace_irq_seqno == 0) - i915_user_irq_get(dev); + render_ring->user_irq_get(dev, render_ring); dev_priv->trace_irq_seqno = seqno; } @@ -1042,6 +1033,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1055,10 +1047,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - i915_user_irq_get(dev); - DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, + render_ring->user_irq_get(dev, render_ring); + DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - i915_user_irq_put(dev); + render_ring->user_irq_put(dev, render_ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1077,7 +1069,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - if (!dev_priv || !dev_priv->ring.virtual_start) { + if (!dev_priv || !dev_priv->render_ring.virtual_start) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } @@ -1223,9 +1215,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { +struct drm_i915_gem_request * +i915_get_tail_request(struct drm_device *dev) +{ drm_i915_private_t *dev_priv = dev->dev_private; - return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); + return list_entry(dev_priv->render_ring.request_list.prev, + struct drm_i915_gem_request, list); } /** @@ -1250,8 +1245,10 @@ void i915_hangcheck_elapsed(unsigned long data) acthd = I915_READ(ACTHD_I965); /* If all work is done then ACTHD clearly hasn't advanced. */ - if (list_empty(&dev_priv->mm.request_list) || - i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { + if (list_empty(&dev_priv->render_ring.request_list) || + i915_seqno_passed(i915_get_gem_seqno(dev, + &dev_priv->render_ring), + i915_get_tail_request(dev)->seqno)) { dev_priv->hangcheck_count = 0; return; } @@ -1304,7 +1301,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; - u32 render_mask = GT_USER_INTERRUPT; + u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; @@ -1318,7 +1315,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) (void) I915_READ(DEIER); /* user interrupt should be enabled, but masked initial */ - dev_priv->gt_irq_mask_reg = 0xffffffff; + dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; I915_WRITE(GTIIR, I915_READ(GTIIR)); @@ -1381,7 +1378,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; u32 error_mask; - DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); + + if (HAS_BSD(dev)) + DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -1395,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) dev_priv->pipestat[1] = 0; if (I915_HAS_HOTPLUG(dev)) { - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); - - /* Note HDMI and DP share bits */ - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) - hotplug_en |= HDMIB_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) - hotplug_en |= HDMIC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) - hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOB_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) - hotplug_en |= CRT_HOTPLUG_INT_EN; - /* Ignore TV since it's buggy */ - - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ - i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; } /* @@ -1435,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev) } I915_WRITE(EMR, error_mask); - /* Disable pipe interrupt enables, clear pending pipe status */ - I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); - I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); - /* Clear pending interrupt status */ - I915_WRITE(IIR, I915_READ(IIR)); - - I915_WRITE(IER, enable_mask); I915_WRITE(IMR, dev_priv->irq_mask_reg); + I915_WRITE(IER, enable_mask); (void) I915_READ(IER); + if (I915_HAS_HOTPLUG(dev)) { + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + + /* Note HDMI and DP share bits */ + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + + /* Programming the CRT detection parameters tends + to generate a spurious hotplug event about three + seconds later. So just do it once. + */ + if (IS_G4X(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } + + /* Ignore TV since it's buggy */ + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + } + opregion_enable_asle(dev); return 0; diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 7cc8410239cb..8fcc75c1aa28 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c @@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; struct drm_connector *connector; + acpi_handle handle; + struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; + unsigned long long device_id; + acpi_status status; int i = 0; + handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) + return; + + if (acpi_is_video_device(acpi_dev)) + acpi_video_bus = acpi_dev; + else { + list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { + if (acpi_is_video_device(acpi_cdev)) { + acpi_video_bus = acpi_cdev; + break; + } + } + } + + if (!acpi_video_bus) { + printk(KERN_WARNING "No ACPI video bus found\n"); + return; + } + + list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { + if (i >= 8) { + dev_printk (KERN_ERR, &dev->pdev->dev, + "More than 8 outputs detected\n"); + return; + } + status = + acpi_evaluate_integer(acpi_cdev->handle, "_ADR", + NULL, &device_id); + if (ACPI_SUCCESS(status)) { + if (!device_id) + goto blind_set; + opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); + i++; + } + } + +end: + /* If fewer than 8 outputs, the list must be null terminated */ + if (i < 8) + opregion->acpi->didl[i] = 0; + return; + +blind_set: + i = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { @@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) opregion->acpi->didl[i] |= (1<<31) | output_type | i; i++; } - - /* If fewer than 8 outputs, the list must be null terminated */ - if (i < 8) - opregion->acpi->didl[i] = 0; + goto end; } int intel_opregion_init(struct drm_device *dev, int resume) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3d59862c7ccd..cf41c672defe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -178,6 +178,7 @@ #define MI_OVERLAY_OFF (0x2<<21) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) +#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ @@ -230,6 +231,16 @@ #define ASYNC_FLIP (1<<22) #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) +#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) +#define PIPE_CONTROL_QW_WRITE (1<<14) +#define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_WC_FLUSH (1<<12) +#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ +#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_ISP_DIS (1<<9) +#define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ +#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ /* * Fence registers @@ -241,7 +252,7 @@ #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) -#define I915_FENCE_MAX_PITCH_VAL 0x10 +#define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) @@ -298,6 +309,10 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 + +#define MI_MODE 0x0209c +# define VS_TIMER_DISPATCH (1 << 6) + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -320,6 +335,7 @@ #define I915_DEBUG_INTERRUPT (1<<2) #define I915_USER_INTERRUPT (1<<1) #define I915_ASLE_INTERRUPT (1<<0) +#define I915_BSD_USER_INTERRUPT (1<<25) #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 @@ -343,6 +359,70 @@ #define LM_BURST_LENGTH 0x00000700 #define LM_FIFO_WATERMARK 0x0000001F #define MI_ARB_STATE 0x020e4 /* 915+ only */ +#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ + +/* Make render/texture TLB fetches lower priorty than associated data + * fetches. This is not turned on by default + */ +#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) + +/* Isoch request wait on GTT enable (Display A/B/C streams). + * Make isoch requests stall on the TLB update. May cause + * display underruns (test mode only) + */ +#define MI_ARB_ISOCH_WAIT_GTT (1 << 14) + +/* Block grant count for isoch requests when block count is + * set to a finite value. + */ +#define MI_ARB_BLOCK_GRANT_MASK (3 << 12) +#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ +#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ +#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ +#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ + +/* Enable render writes to complete in C2/C3/C4 power states. + * If this isn't enabled, render writes are prevented in low + * power states. That seems bad to me. + */ +#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) + +/* This acknowledges an async flip immediately instead + * of waiting for 2TLB fetches. + */ +#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) + +/* Enables non-sequential data reads through arbiter + */ +#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) + +/* Disable FSB snooping of cacheable write cycles from binner/render + * command stream + */ +#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) + +/* Arbiter time slice for non-isoch streams */ +#define MI_ARB_TIME_SLICE_MASK (7 << 5) +#define MI_ARB_TIME_SLICE_1 (0 << 5) +#define MI_ARB_TIME_SLICE_2 (1 << 5) +#define MI_ARB_TIME_SLICE_4 (2 << 5) +#define MI_ARB_TIME_SLICE_6 (3 << 5) +#define MI_ARB_TIME_SLICE_8 (4 << 5) +#define MI_ARB_TIME_SLICE_10 (5 << 5) +#define MI_ARB_TIME_SLICE_14 (6 << 5) +#define MI_ARB_TIME_SLICE_16 (7 << 5) + +/* Low priority grace period page size */ +#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ +#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) + +/* Disable display A/B trickle feed */ +#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) + +/* Set display plane priority */ +#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ +#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ + #define CACHE_MODE_0 0x02120 /* 915+ only */ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) @@ -353,7 +433,40 @@ #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ +#define ECOSKPD 0x021d0 +#define ECO_GATING_CX_ONLY (1<<3) +#define ECO_FLIP_DONE (1<<0) + +/* GEN6 interrupt control */ +#define GEN6_RENDER_HWSTAM 0x2098 +#define GEN6_RENDER_IMR 0x20a8 +#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) +#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) +#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) +#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) +#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) +#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) +#define GEN6_RENDER_SYNC_STATUS (1 << 2) +#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) +#define GEN6_RENDER_USER_INTERRUPT (1 << 0) + +#define GEN6_BLITTER_HWSTAM 0x22098 +#define GEN6_BLITTER_IMR 0x220a8 +#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) +#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) +#define GEN6_BLITTER_SYNC_STATUS (1 << 24) +#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) +/* + * BSD (bit stream decoder instruction and interrupt control register defines + * (G4X and Ironlake only) + */ +#define BSD_RING_TAIL 0x04030 +#define BSD_RING_HEAD 0x04034 +#define BSD_RING_START 0x04038 +#define BSD_RING_CTL 0x0403c +#define BSD_RING_ACTHD 0x04074 +#define BSD_HWS_PGA 0x04080 /* * Framebuffer compression (915+ only) @@ -366,7 +479,7 @@ #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) -#define FBC_C3_IDLE (1<<13) +#define FBC_CTL_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c @@ -791,6 +904,10 @@ #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) +/** Pineview MCH register contains DDR3 setting */ +#define CSHRDDR3CTL 0x101a8 +#define CSHRDDR3CTL_DDR3 (1 << 2) + /** 965 MCH register controlling DRAM channel configuration */ #define C0DRB3 0x10206 #define C1DRB3 0x10606 @@ -812,6 +929,12 @@ #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) +#define TR1 0x11006 +#define TSFS 0x11020 +#define TSFS_SLOPE_MASK 0x0000ff00 +#define TSFS_SLOPE_SHIFT 8 +#define TSFS_INTR_MASK 0x000000ff + #define CRSTANDVID 0x11100 #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ #define PXVFREQ_PX_MASK 0x7f000000 @@ -950,6 +1073,41 @@ #define MEMSTAT_SRC_CTL_STDBY 3 #define RCPREVBSYTUPAVG 0x113b8 #define RCPREVBSYTDNAVG 0x113bc +#define SDEW 0x1124c +#define CSIEW0 0x11250 +#define CSIEW1 0x11254 +#define CSIEW2 0x11258 +#define PEW 0x1125c +#define DEW 0x11270 +#define MCHAFE 0x112c0 +#define CSIEC 0x112e0 +#define DMIEC 0x112e4 +#define DDREC 0x112e8 +#define PEG0EC 0x112ec +#define PEG1EC 0x112f0 +#define GFXEC 0x112f4 +#define RPPREVBSYTUPAVG 0x113b8 +#define RPPREVBSYTDNAVG 0x113bc +#define ECR 0x11600 +#define ECR_GPFE (1<<31) +#define ECR_IMONE (1<<30) +#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ +#define OGW0 0x11608 +#define OGW1 0x1160c +#define EG0 0x11610 +#define EG1 0x11614 +#define EG2 0x11618 +#define EG3 0x1161c +#define EG4 0x11620 +#define EG5 0x11624 +#define EG6 0x11628 +#define EG7 0x1162c +#define PXW 0x11664 +#define PXWL 0x11680 +#define LCFUSE02 0x116c0 +#define LCFUSE_HIV_MASK 0x000000ff +#define CSIPLL0 0x12c10 +#define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 /* @@ -1040,8 +1198,6 @@ #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) -#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ -#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) @@ -1750,6 +1906,14 @@ #define DP_LINK_TRAIN_MASK (3 << 28) #define DP_LINK_TRAIN_SHIFT 28 +/* CPT Link training mode */ +#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) +#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) +#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) +#define DP_LINK_TRAIN_OFF_CPT (3 << 8) +#define DP_LINK_TRAIN_MASK_CPT (7 << 8) +#define DP_LINK_TRAIN_SHIFT_CPT 8 + /* Signal voltages. These are mostly controlled by the other end */ #define DP_VOLTAGE_0_4 (0 << 25) #define DP_VOLTAGE_0_6 (1 << 25) @@ -1910,7 +2074,10 @@ /* Display & cursor control */ /* dithering flag on Ironlake */ -#define PIPE_ENABLE_DITHER (1 << 4) +#define PIPE_ENABLE_DITHER (1 << 4) +#define PIPE_DITHER_TYPE_MASK (3 << 2) +#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) +#define PIPE_DITHER_TYPE_ST01 (1 << 2) /* Pipe A */ #define PIPEADSL 0x70000 #define PIPEACONF 0x70008 @@ -1974,15 +2141,24 @@ #define DSPFW1 0x70034 #define DSPFW_SR_SHIFT 23 +#define DSPFW_SR_MASK (0x1ff<<23) #define DSPFW_CURSORB_SHIFT 16 +#define DSPFW_CURSORB_MASK (0x3f<<16) #define DSPFW_PLANEB_SHIFT 8 +#define DSPFW_PLANEB_MASK (0x7f<<8) +#define DSPFW_PLANEA_MASK (0x7f) #define DSPFW2 0x70038 #define DSPFW_CURSORA_MASK 0x00003f00 #define DSPFW_CURSORA_SHIFT 8 +#define DSPFW_PLANEC_MASK (0x7f) #define DSPFW3 0x7003c #define DSPFW_HPLL_SR_EN (1<<31) #define DSPFW_CURSOR_SR_SHIFT 24 #define PINEVIEW_SELF_REFRESH_EN (1<<30) +#define DSPFW_CURSOR_SR_MASK (0x3f<<24) +#define DSPFW_HPLL_CURSOR_SHIFT 16 +#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) +#define DSPFW_HPLL_SR_MASK (0x1ff) /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 @@ -2009,6 +2185,43 @@ #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 + +/* define the Watermark register on Ironlake */ +#define WM0_PIPEA_ILK 0x45100 +#define WM0_PIPE_PLANE_MASK (0x7f<<16) +#define WM0_PIPE_PLANE_SHIFT 16 +#define WM0_PIPE_SPRITE_MASK (0x3f<<8) +#define WM0_PIPE_SPRITE_SHIFT 8 +#define WM0_PIPE_CURSOR_MASK (0x1f) + +#define WM0_PIPEB_ILK 0x45104 +#define WM1_LP_ILK 0x45108 +#define WM1_LP_SR_EN (1<<31) +#define WM1_LP_LATENCY_SHIFT 24 +#define WM1_LP_LATENCY_MASK (0x7f<<24) +#define WM1_LP_SR_MASK (0x1ff<<8) +#define WM1_LP_SR_SHIFT 8 +#define WM1_LP_CURSOR_MASK (0x3f) + +/* Memory latency timer register */ +#define MLTR_ILK 0x11222 +/* the unit of memory self-refresh latency time is 0.5us */ +#define ILK_SRLT_MASK 0x3f + +/* define the fifo size on Ironlake */ +#define ILK_DISPLAY_FIFO 128 +#define ILK_DISPLAY_MAXWM 64 +#define ILK_DISPLAY_DFTWM 8 + +#define ILK_DISPLAY_SR_FIFO 512 +#define ILK_DISPLAY_MAX_SRWM 0x1ff +#define ILK_DISPLAY_DFT_SRWM 0x3f +#define ILK_CURSOR_SR_FIFO 64 +#define ILK_CURSOR_MAX_SRWM 0x3f +#define ILK_CURSOR_DFT_SRWM 8 + +#define ILK_FIFO_LINE_SIZE 64 + /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -2172,6 +2385,14 @@ #define DISPLAY_PORT_PLL_BIOS_1 0x46010 #define DISPLAY_PORT_PLL_BIOS_2 0x46014 +#define PCH_DSPCLK_GATE_D 0x42020 +# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) +# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) + +#define PCH_3DCGDIS0 0x46020 +# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) +# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) + #define FDI_PLL_FREQ_CTL 0x46030 #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 @@ -2273,16 +2494,26 @@ #define DEIER 0x4400c /* GT interrupt */ +#define GT_PIPE_NOTIFY (1 << 4) #define GT_SYNC_STATUS (1 << 2) #define GT_USER_INTERRUPT (1 << 0) +#define GT_BSD_USER_INTERRUPT (1 << 5) + #define GTISR 0x44010 #define GTIMR 0x44014 #define GTIIR 0x44018 #define GTIER 0x4401c +#define ILK_DISPLAY_CHICKEN2 0x42004 +#define ILK_DPARB_GATE (1<<22) +#define ILK_VSDPFD_FULL (1<<21) +#define ILK_DSPCLK_GATE 0x42020 +#define ILK_DPARB_CLK_GATE (1<<5) + #define DISP_ARB_CTL 0x45000 #define DISP_TILE_SURFACE_SWIZZLING (1<<13) +#define DISP_FBC_WM_DIS (1<<15) /* PCH */ @@ -2293,6 +2524,11 @@ #define SDE_PORTB_HOTPLUG (1 << 8) #define SDE_SDVOB_HOTPLUG (1 << 6) #define SDE_HOTPLUG_MASK (0xf << 8) +/* CPT */ +#define SDE_CRT_HOTPLUG_CPT (1 << 19) +#define SDE_PORTD_HOTPLUG_CPT (1 << 23) +#define SDE_PORTC_HOTPLUG_CPT (1 << 22) +#define SDE_PORTB_HOTPLUG_CPT (1 << 21) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 @@ -2384,6 +2620,17 @@ #define PCH_SSC4_PARMS 0xc6210 #define PCH_SSC4_AUX_PARMS 0xc6214 +#define PCH_DPLL_SEL 0xc7000 +#define TRANSA_DPLL_ENABLE (1<<3) +#define TRANSA_DPLLB_SEL (1<<0) +#define TRANSA_DPLLA_SEL 0 +#define TRANSB_DPLL_ENABLE (1<<7) +#define TRANSB_DPLLB_SEL (1<<4) +#define TRANSB_DPLLA_SEL (0) +#define TRANSC_DPLL_ENABLE (1<<11) +#define TRANSC_DPLLB_SEL (1<<8) +#define TRANSC_DPLLA_SEL (0) + /* transcoder */ #define TRANS_HTOTAL_A 0xe0000 @@ -2470,6 +2717,19 @@ #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) +/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. + SNB has different settings. */ +/* SNB A-stepping */ +#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +/* SNB B-stepping */ +#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) +#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) #define FDI_DP_PORT_WIDTH_X1 (0<<19) #define FDI_DP_PORT_WIDTH_X2 (1<<19) #define FDI_DP_PORT_WIDTH_X3 (2<<19) @@ -2502,6 +2762,13 @@ #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) #define FDI_SEL_RAWCLK (0<<4) #define FDI_SEL_PCDCLK (1<<4) +/* CPT */ +#define FDI_AUTO_TRAINING (1<<10) +#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) +#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) +#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) +#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) +#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) #define FDI_RXA_MISC 0xf0010 #define FDI_RXB_MISC 0xf1010 @@ -2567,12 +2834,18 @@ #define SDVO_ENCODING (0) #define TMDS_ENCODING (2 << 10) #define NULL_PACKET_VSYNC_ENABLE (1 << 9) +/* CPT */ +#define HDMI_MODE_SELECT (1 << 9) +#define DVI_MODE_SELECT (0) #define SDVOB_BORDER_ENABLE (1 << 7) #define AUDIO_ENABLE (1 << 6) #define VSYNC_ACTIVE_HIGH (1 << 4) #define HSYNC_ACTIVE_HIGH (1 << 3) #define PORT_DETECTED (1 << 2) +/* PCH SDVOB multiplex with HDMIB */ +#define PCH_SDVOB HDMIB + #define HDMIC 0xe1150 #define HDMID 0xe1160 @@ -2596,6 +2869,7 @@ #define PCH_PP_STATUS 0xc7200 #define PCH_PP_CONTROL 0xc7204 +#define PANEL_UNLOCK_REGS (0xabcd << 16) #define EDP_FORCE_VDD (1 << 3) #define EDP_BLC_ENABLE (1 << 2) #define PANEL_POWER_RESET (1 << 1) @@ -2630,4 +2904,42 @@ #define PCH_DPD_AUX_CH_DATA4 0xe4320 #define PCH_DPD_AUX_CH_DATA5 0xe4324 +/* CPT */ +#define PORT_TRANS_A_SEL_CPT 0 +#define PORT_TRANS_B_SEL_CPT (1<<29) +#define PORT_TRANS_C_SEL_CPT (2<<29) +#define PORT_TRANS_SEL_MASK (3<<29) + +#define TRANS_DP_CTL_A 0xe0300 +#define TRANS_DP_CTL_B 0xe1300 +#define TRANS_DP_CTL_C 0xe2300 +#define TRANS_DP_OUTPUT_ENABLE (1<<31) +#define TRANS_DP_PORT_SEL_B (0<<29) +#define TRANS_DP_PORT_SEL_C (1<<29) +#define TRANS_DP_PORT_SEL_D (2<<29) +#define TRANS_DP_PORT_SEL_MASK (3<<29) +#define TRANS_DP_AUDIO_ONLY (1<<26) +#define TRANS_DP_ENH_FRAMING (1<<18) +#define TRANS_DP_8BPC (0<<9) +#define TRANS_DP_10BPC (1<<9) +#define TRANS_DP_6BPC (2<<9) +#define TRANS_DP_12BPC (3<<9) +#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) +#define TRANS_DP_VSYNC_ACTIVE_LOW 0 +#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) +#define TRANS_DP_HSYNC_ACTIVE_LOW 0 + +/* SNB eDP training params */ +/* SNB A-stepping */ +#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) +#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) +#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) +#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +/* SNB B-stepping */ +#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) +#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) +#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) +#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) +#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index ac0d1a73ac22..60a5800fba6e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev) } /* FIXME: save TV & SDVO state */ - /* FBC state */ - if (IS_GM45(dev)) { - dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); - } else { - dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); + /* Only save FBC state on the platform that supports FBC */ + if (I915_HAS_FBC(dev)) { + if (IS_GM45(dev)) { + dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); + } else { + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); + } } /* VGA state */ @@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev) } /* FIXME: restore TV & SDVO state */ - /* FBC info */ - if (IS_GM45(dev)) { - g4x_disable_fbc(dev); - I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); - } else { - i8xx_disable_fbc(dev); - I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); + /* only restore FBC info on the platform that supports FBC*/ + if (I915_HAS_FBC(dev)) { + if (IS_GM45(dev)) { + g4x_disable_fbc(dev); + I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); + } else { + i8xx_disable_fbc(dev); + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); + } } - /* VGA state */ if (IS_IRONLAKE(dev)) I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 01840d9bc38f..fab21760dd57 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind, __entry->obj, __entry->gtt_offset) ); -TRACE_EVENT(i915_gem_object_clflush, - - TP_PROTO(struct drm_gem_object *obj), - - TP_ARGS(obj), - - TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) - ), - - TP_fast_assign( - __entry->obj = obj; - ), - - TP_printk("obj=%p", __entry->obj) -); - TRACE_EVENT(i915_gem_object_change_domain, TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), @@ -115,7 +98,7 @@ TRACE_EVENT(i915_gem_object_get_fence, __entry->obj, __entry->fence, __entry->tiling_mode) ); -TRACE_EVENT(i915_gem_object_unbind, +DECLARE_EVENT_CLASS(i915_gem_object, TP_PROTO(struct drm_gem_object *obj), @@ -132,21 +115,25 @@ TRACE_EVENT(i915_gem_object_unbind, TP_printk("obj=%p", __entry->obj) ); -TRACE_EVENT(i915_gem_object_destroy, +DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, TP_PROTO(struct drm_gem_object *obj), - TP_ARGS(obj), + TP_ARGS(obj) +); - TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) - ), +DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, - TP_fast_assign( - __entry->obj = obj; - ), + TP_PROTO(struct drm_gem_object *obj), - TP_printk("obj=%p", __entry->obj) + TP_ARGS(obj) +); + +DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, + + TP_PROTO(struct drm_gem_object *obj), + + TP_ARGS(obj) ); /* batch tracing */ @@ -197,8 +184,7 @@ TRACE_EVENT(i915_gem_request_flush, __entry->flush_domains, __entry->invalidate_domains) ); - -TRACE_EVENT(i915_gem_request_complete, +DECLARE_EVENT_CLASS(i915_gem_request, TP_PROTO(struct drm_device *dev, u32 seqno), @@ -217,64 +203,35 @@ TRACE_EVENT(i915_gem_request_complete, TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); -TRACE_EVENT(i915_gem_request_retire, +DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, TP_PROTO(struct drm_device *dev, u32 seqno), - TP_ARGS(dev, seqno), - - TP_STRUCT__entry( - __field(u32, dev) - __field(u32, seqno) - ), - - TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->seqno = seqno; - ), - - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + TP_ARGS(dev, seqno) ); -TRACE_EVENT(i915_gem_request_wait_begin, +DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, TP_PROTO(struct drm_device *dev, u32 seqno), - TP_ARGS(dev, seqno), - - TP_STRUCT__entry( - __field(u32, dev) - __field(u32, seqno) - ), - - TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->seqno = seqno; - ), - - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + TP_ARGS(dev, seqno) ); -TRACE_EVENT(i915_gem_request_wait_end, +DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, TP_PROTO(struct drm_device *dev, u32 seqno), - TP_ARGS(dev, seqno), + TP_ARGS(dev, seqno) +); - TP_STRUCT__entry( - __field(u32, dev) - __field(u32, seqno) - ), +DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, - TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->seqno = seqno; - ), + TP_PROTO(struct drm_device *dev, u32 seqno), - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + TP_ARGS(dev, seqno) ); -TRACE_EVENT(i915_ring_wait_begin, +DECLARE_EVENT_CLASS(i915_ring, TP_PROTO(struct drm_device *dev), @@ -291,26 +248,23 @@ TRACE_EVENT(i915_ring_wait_begin, TP_printk("dev=%u", __entry->dev) ); -TRACE_EVENT(i915_ring_wait_end, +DEFINE_EVENT(i915_ring, i915_ring_wait_begin, TP_PROTO(struct drm_device *dev), - TP_ARGS(dev), + TP_ARGS(dev) +); - TP_STRUCT__entry( - __field(u32, dev) - ), +DEFINE_EVENT(i915_ring, i915_ring_wait_end, - TP_fast_assign( - __entry->dev = dev->primary->index; - ), + TP_PROTO(struct drm_device *dev), - TP_printk("dev=%u", __entry->dev) + TP_ARGS(dev) ); #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 +#define TRACE_INCLUDE_PATH . #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 70c9d4ba7042..96f75d7f6633 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + if (dvo_timing->hsync_positive) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; + + if (dvo_timing->vsync_positive) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; @@ -366,6 +376,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_mapping->dvo_port = p_child->dvo_port; p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; + p_mapping->ddc_pin = p_child->ddc_pin; p_mapping->initialized = 1; } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " @@ -417,8 +428,9 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) edp = find_section(bdb, BDB_EDP); if (!edp) { if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { - DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\ - assume 18bpp panel color depth.\n"); + DRM_DEBUG_KMS("No eDP BDB found but eDP panel " + "supported, assume 18bpp panel color " + "depth.\n"); dev_priv->edp_bpp = 18; } return; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index fccf07470c8f..ee0732b222a1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -25,6 +25,7 @@ */ #include <linux/i2c.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" @@ -135,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, adpa |= ADPA_VSYNC_ACTIVE_HIGH; if (intel_crtc->pipe == 0) { - adpa |= ADPA_PIPE_A_SELECT; + if (HAS_PCH_CPT(dev)) + adpa |= PORT_TRANS_A_SEL_CPT; + else + adpa |= ADPA_PIPE_A_SELECT; if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_A, 0); } else { - adpa |= ADPA_PIPE_B_SELECT; + if (HAS_PCH_CPT(dev)) + adpa |= PORT_TRANS_B_SEL_CPT; + else + adpa |= ADPA_PIPE_B_SELECT; if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_B, 0); } @@ -151,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 adpa; + u32 adpa, temp; bool ret; - adpa = I915_READ(PCH_ADPA); + temp = adpa = I915_READ(PCH_ADPA); - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); + if (HAS_PCH_CPT(dev)) { + /* Disable DAC before force detect */ + I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); + (void)I915_READ(PCH_ADPA); + } else { + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + /* disable HPD first */ + I915_WRITE(PCH_ADPA, adpa); + (void)I915_READ(PCH_ADPA); + } adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | @@ -175,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) ; + if (HAS_PCH_CPT(dev)) { + I915_WRITE(PCH_ADPA, temp); + (void)I915_READ(PCH_ADPA); + } + /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; @@ -199,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 hotplug_en; + u32 hotplug_en, orig, stat; + bool ret = false; int i, tries = 0; if (HAS_PCH_SPLIT(dev)) @@ -214,15 +233,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) tries = 2; else tries = 1; - hotplug_en = I915_READ(PORT_HOTPLUG_EN); - hotplug_en &= CRT_FORCE_HOTPLUG_MASK; + hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; - if (IS_G4X(dev)) - hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; - - hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; - for (i = 0; i < tries ; i++) { unsigned long timeout; /* turn on the FORCE_DETECT */ @@ -237,28 +250,34 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) } while (time_after(timeout, jiffies)); } - if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != - CRT_HOTPLUG_MONITOR_NONE) - return true; + stat = I915_READ(PORT_HOTPLUG_STAT); + if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) + ret = true; - return false; + /* clear the interrupt we just generated, if any */ + I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); + + /* and put the bits back */ + I915_WRITE(PORT_HOTPLUG_EN, orig); + + return ret; } -static bool intel_crt_detect_ddc(struct drm_connector *connector) +static bool intel_crt_detect_ddc(struct drm_encoder *encoder) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); /* CRT should always be at 0, but check anyway */ - if (intel_output->type != INTEL_OUTPUT_ANALOG) + if (intel_encoder->type != INTEL_OUTPUT_ANALOG) return false; - return intel_ddc_probe(intel_output); + return intel_ddc_probe(intel_encoder); } static enum drm_connector_status -intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) +intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -386,8 +405,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -399,18 +418,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto return connector_status_disconnected; } - if (intel_crt_detect_ddc(connector)) + if (intel_crt_detect_ddc(encoder)) return connector_status_connected; /* for pre-945g platforms use load detect */ if (encoder->crtc && encoder->crtc->enabled) { - status = intel_crt_load_detect(encoder->crtc, intel_output); + status = intel_crt_load_detect(encoder->crtc, intel_encoder); } else { - crtc = intel_get_load_detect_pipe(intel_output, + crtc = intel_get_load_detect_pipe(intel_encoder, connector, NULL, &dpms_mode); if (crtc) { - status = intel_crt_load_detect(crtc, intel_output); - intel_release_load_detect_pipe(intel_output, dpms_mode); + status = intel_crt_load_detect(crtc, intel_encoder); + intel_release_load_detect_pipe(intel_encoder, + connector, dpms_mode); } else status = connector_status_unknown; } @@ -420,9 +440,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto static void intel_crt_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - - intel_i2c_destroy(intel_output->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -431,29 +448,27 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { int ret; - struct intel_output *intel_output = to_intel_output(connector); - struct i2c_adapter *ddcbus; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; - ret = intel_ddc_get_modes(intel_output); + ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); if (ret || !IS_G4X(dev)) goto end; - ddcbus = intel_output->ddc_bus; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - intel_output->ddc_bus = - intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); + ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); - if (!intel_output->ddc_bus) { - intel_output->ddc_bus = ddcbus; + if (!ddc_bus) { dev_printk(KERN_ERR, &connector->dev->pdev->dev, "DDC bus registration failed for CRTDDC_D.\n"); goto end; } /* Try to get modes by GPIOD port */ - ret = intel_ddc_get_modes(intel_output); - intel_i2c_destroy(ddcbus); + ret = intel_ddc_get_modes(connector, ddc_bus); + intel_i2c_destroy(ddc_bus); end: return ret; @@ -490,12 +505,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_crt_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + intel_i2c_destroy(intel_encoder->ddc_bus); drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_crt_enc_funcs = { @@ -505,23 +524,30 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; u32 i2c_reg; - intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); - if (!intel_output) + intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); + if (!intel_encoder) return; - connector = &intel_output->base; - drm_connector_init(dev, &intel_output->base, + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); + return; + } + + connector = &intel_connector->base; + drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); /* Set up the DDC bus. */ if (HAS_PCH_SPLIT(dev)) @@ -532,25 +558,30 @@ void intel_crt_init(struct drm_device *dev) if (dev_priv->crt_ddc_bus != 0) i2c_reg = dev_priv->crt_ddc_bus; } - intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); - if (!intel_output->ddc_bus) { + intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); + if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); return; } - intel_output->type = INTEL_OUTPUT_ANALOG; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + intel_encoder->type = INTEL_OUTPUT_ANALOG; + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT); - intel_output->crtc_mask = (1 << 0) | (1 << 1); - connector->interlace_allowed = 0; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); + if (I915_HAS_HOTPLUG(dev)) + connector->polled = DRM_CONNECTOR_POLL_HPD; + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9cd6de5f9906..5e21b3119824 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -28,6 +28,7 @@ #include <linux/input.h> #include <linux/i2c.h> #include <linux/kernel.h> +#include <linux/slab.h> #include "drmP.h" #include "intel_drv.h" #include "i915_drm.h" @@ -741,36 +742,18 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *l_entry; + struct drm_encoder *l_entry; - list_for_each_entry(l_entry, &mode_config->connector_list, head) { - if (l_entry->encoder && - l_entry->encoder->crtc == crtc) { - struct intel_output *intel_output = to_intel_output(l_entry); - if (intel_output->type == type) + list_for_each_entry(l_entry, &mode_config->encoder_list, head) { + if (l_entry && l_entry->crtc == crtc) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); + if (intel_encoder->type == type) return true; } } return false; } -struct drm_connector * -intel_pipe_get_output (struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *l_entry, *ret = NULL; - - list_for_each_entry(l_entry, &mode_config->connector_list, head) { - if (l_entry->encoder && - l_entry->encoder->crtc == crtc) { - ret = l_entry; - break; - } - } - return ret; -} - #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with @@ -879,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, intel_clock_t clock; int max_n; bool found; - /* approximately equals target * 0.00488 */ - int err_most = (target >> 8) + (target >> 10); + /* approximately equals target * 0.00585 */ + int err_most = (target >> 8) + (target >> 9); found = false; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -904,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, memset(best_clock, 0, sizeof(*best_clock)); max_n = limit->n.max; - /* based on hardware requriment prefer smaller n to precision */ + /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { - /* based on hardware requirment prefere larger m1,m2 */ + /* based on hardware requirement, prefere larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; @@ -1002,7 +985,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; @@ -1032,7 +1015,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* enable it... */ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev)) - fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; if (obj_priv->tiling_mode != I915_TILING_NONE) @@ -1046,28 +1029,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long timeout = jiffies + msecs_to_jiffies(1); u32 fbc_ctl; if (!I915_HAS_FBC(dev)) return; + if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) + return; /* Already off, just return */ + /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); fbc_ctl &= ~FBC_CTL_EN; I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) - ; /* nothing */ + while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { + if (time_after(jiffies, timeout)) { + DRM_DEBUG_DRIVER("FBC idle timed out\n"); + break; + } + ; /* do nothing */ + } intel_wait_for_vblank(dev); DRM_DEBUG_KMS("disabled FBC\n"); } -static bool i8xx_fbc_enabled(struct drm_crtc *crtc) +static bool i8xx_fbc_enabled(struct drm_device *dev) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(FBC_CONTROL) & FBC_CTL_EN; @@ -1079,7 +1070,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB); @@ -1124,14 +1115,43 @@ void g4x_disable_fbc(struct drm_device *dev) DRM_DEBUG_KMS("disabled FBC\n"); } -static bool g4x_fbc_enabled(struct drm_crtc *crtc) +static bool g4x_fbc_enabled(struct drm_device *dev) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } +bool intel_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.fbc_enabled) + return false; + + return dev_priv->display.fbc_enabled(dev); +} + +void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + + if (!dev_priv->display.enable_fbc) + return; + + dev_priv->display.enable_fbc(crtc, interval); +} + +void intel_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.disable_fbc) + return; + + dev_priv->display.disable_fbc(dev); +} + /** * intel_update_fbc - enable/disable FBC as needed * @crtc: CRTC to point the compressor at @@ -1160,31 +1180,44 @@ static void intel_update_fbc(struct drm_crtc *crtc, struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj_priv; + struct drm_crtc *tmp_crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane; + int crtcs_enabled = 0; + + DRM_DEBUG_KMS("\n"); if (!i915_powersave) return; - if (!dev_priv->display.fbc_enabled || - !dev_priv->display.enable_fbc || - !dev_priv->display.disable_fbc) + if (!I915_HAS_FBC(dev)) return; if (!crtc->fb) return; intel_fb = to_intel_framebuffer(fb); - obj_priv = intel_fb->obj->driver_private; + obj_priv = to_intel_bo(intel_fb->obj); /* * If FBC is already on, we just have to verify that we can * keep it that way... * Need to disable if: + * - more than one pipe is active * - changing FBC params (stride, fence, mode) * - new fb is too large to fit in compressed buffer * - going to an unsupported config (interlace, pixel multiply, etc.) */ + list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { + if (tmp_crtc->enabled) + crtcs_enabled++; + } + DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); + if (crtcs_enabled > 1) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } if (intel_fb->obj->size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); @@ -1215,34 +1248,32 @@ static void intel_update_fbc(struct drm_crtc *crtc, goto out_disable; } - if (dev_priv->display.fbc_enabled(crtc)) { + if (intel_fbc_enabled(dev)) { /* We can re-enable it in this case, but need to update pitch */ - if (fb->pitch > dev_priv->cfb_pitch) - dev_priv->display.disable_fbc(dev); - if (obj_priv->fence_reg != dev_priv->cfb_fence) - dev_priv->display.disable_fbc(dev); - if (plane != dev_priv->cfb_plane) - dev_priv->display.disable_fbc(dev); + if ((fb->pitch > dev_priv->cfb_pitch) || + (obj_priv->fence_reg != dev_priv->cfb_fence) || + (plane != dev_priv->cfb_plane)) + intel_disable_fbc(dev); } - if (!dev_priv->display.fbc_enabled(crtc)) { - /* Now try to turn it back on if possible */ - dev_priv->display.enable_fbc(crtc, 500); - } + /* Now try to turn it back on if possible */ + if (!intel_fbc_enabled(dev)) + intel_enable_fbc(crtc, 500); return; out_disable: - DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); /* Multiple disables should be harmless */ - if (dev_priv->display.fbc_enabled(crtc)) - dev_priv->display.disable_fbc(dev); + if (intel_fbc_enabled(dev)) { + DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); + intel_disable_fbc(dev); + } } -static int +int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; int ret; @@ -1322,7 +1353,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_fb = to_intel_framebuffer(crtc->fb); obj = intel_fb->obj; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj); @@ -1380,7 +1411,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, Start = obj_priv->gtt_offset; Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + Start, Offset, x, y, crtc->fb->pitch); I915_WRITE(dspstride, crtc->fb->pitch); if (IS_I965G(dev)) { I915_WRITE(dspbase, Offset); @@ -1400,7 +1432,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { intel_fb = to_intel_framebuffer(old_fb); - obj_priv = intel_fb->obj->driver_private; + obj_priv = to_intel_bo(intel_fb->obj); i915_gem_object_unpin(intel_fb->obj); } intel_increase_pllclock(crtc, true); @@ -1509,6 +1541,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) udelay(500); } +/* The FDI link training functions for ILK/Ibexpeak. */ +static void ironlake_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; + int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; + u32 temp, tries = 0; + + /* enable CPU FDI TX and PCH FDI RX */ + temp = I915_READ(fdi_tx_reg); + temp |= FDI_TX_ENABLE; + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); + udelay(150); + + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + + for (;;) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if ((temp & FDI_RX_BIT_LOCK)) { + DRM_DEBUG_KMS("FDI train 1 done.\n"); + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + break; + } + + tries++; + + if (tries > 5) { + DRM_DEBUG_KMS("FDI train 1 fail!\n"); + break; + } + } + + /* Train 2 */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_rx_reg, temp); + udelay(150); + + tries = 0; + + for (;;) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + + tries++; + + if (tries > 5) { + DRM_DEBUG_KMS("FDI train 2 fail!\n"); + break; + } + } + + DRM_DEBUG_KMS("FDI train done\n"); +} + +static int snb_b_fdi_train_param [] = { + FDI_LINK_TRAIN_400MV_0DB_SNB_B, + FDI_LINK_TRAIN_400MV_6DB_SNB_B, + FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, + FDI_LINK_TRAIN_800MV_0DB_SNB_B, +}; + +/* The FDI link training functions for SNB/Cougarpoint. */ +static void gen6_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; + int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; + u32 temp, i; + + /* enable CPU FDI TX and PCH FDI RX */ + temp = I915_READ(fdi_tx_reg); + temp |= FDI_TX_ENABLE; + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + /* SNB-B */ + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + I915_WRITE(fdi_tx_reg, temp); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); + udelay(150); + + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + + for (i = 0; i < 4; i++ ) { + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(fdi_tx_reg, temp); + udelay(500); + + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_BIT_LOCK) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done.\n"); + break; + } + } + if (i == 4) + DRM_DEBUG_KMS("FDI train 1 fail!\n"); + + /* Train 2 */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + if (IS_GEN6(dev)) { + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + /* SNB-B */ + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + } + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + } + I915_WRITE(fdi_rx_reg, temp); + udelay(150); + + for (i = 0; i < 4; i++ ) { + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(fdi_tx_reg, temp); + udelay(500); + + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + } + if (i == 4) + DRM_DEBUG_KMS("FDI train 2 fail!\n"); + + DRM_DEBUG_KMS("FDI train done.\n"); +} + static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; @@ -1522,8 +1767,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; - int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; @@ -1540,8 +1783,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; + int trans_dpll_sel = (pipe == 0) ? 0 : 1; u32 temp; - int tries = 5, j, n; + int n; u32 pipe_bpc; temp = I915_READ(pipeconf_reg); @@ -1568,12 +1812,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) /* enable eDP PLL */ ironlake_enable_pll_edp(crtc); } else { - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ temp = I915_READ(fdi_rx_reg); @@ -1583,9 +1821,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) */ temp &= ~(0x7 << 16); temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | - FDI_SEL_PCDCLK | - FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + I915_READ(fdi_rx_reg); + udelay(200); + + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); I915_READ(fdi_rx_reg); udelay(200); @@ -1628,91 +1872,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } if (!HAS_eDP) { - /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; - temp |= FDI_DP_PORT_WIDTH_X4; /* default */ - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); - - udelay(150); - - /* Train FDI. */ - /* umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); + /* For PCH output, training FDI link */ + if (IS_GEN6(dev)) + gen6_fdi_link_train(crtc); + else + ironlake_fdi_link_train(crtc); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if ((temp & FDI_RX_BIT_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", - temp); - if (temp & FDI_RX_BIT_LOCK) - break; - udelay(200); - } - if (j != tries) - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); - else - DRM_DEBUG_KMS("train 1 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("train 1 ok 2!\n"); + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); } - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_tx_reg, temp); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_rx_reg, temp); - - udelay(150); + udelay(200); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", - temp); - if (temp & FDI_RX_SYMBOL_LOCK) - break; - udelay(200); - } - if (j != tries) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("train 2 ok 1!\n"); - } else - DRM_DEBUG_KMS("train 2 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("train 2 ok 2!\n"); + if (HAS_PCH_CPT(dev)) { + /* Be sure PCH DPLL SEL is set */ + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0 && + (temp & TRANSA_DPLL_ENABLE) == 0) + temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + else if (trans_dpll_sel == 1 && + (temp & TRANSB_DPLL_ENABLE) == 0) + temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); } - DRM_DEBUG_KMS("train done\n"); /* set transcoder timing */ I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); @@ -1723,6 +1908,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + /* enable normal train */ + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); + + /* wait one idle pattern time */ + udelay(100); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; + int reg; + + reg = I915_READ(trans_dp_ctl); + reg &= ~TRANS_DP_PORT_SEL_MASK; + reg = TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING | + TRANS_DP_VSYNC_ACTIVE_HIGH | + TRANS_DP_HSYNC_ACTIVE_HIGH; + + switch (intel_trans_dp_port_sel(crtc)) { + case PCH_DP_B: + reg |= TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + reg |= TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + reg |= TRANS_DP_PORT_SEL_D; + break; + default: + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + reg |= TRANS_DP_PORT_SEL_B; + break; + } + + I915_WRITE(trans_dp_ctl, reg); + POSTING_READ(trans_dp_ctl); + } + /* enable PCH transcoder */ temp = I915_READ(transconf_reg); /* @@ -1737,23 +1976,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) ; - /* enable normal */ - - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); - - /* wait one idle pattern time */ - udelay(100); - } intel_crtc_load_lut(crtc); @@ -1804,6 +2026,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(pf_ctl_reg); } I915_WRITE(pf_win_size, 0); + POSTING_READ(pf_win_size); + /* disable CPU FDI tx and PCH FDI rx */ temp = I915_READ(fdi_tx_reg); @@ -1824,11 +2048,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(fdi_tx_reg, temp); + POSTING_READ(fdi_tx_reg); temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } I915_WRITE(fdi_rx_reg, temp); + POSTING_READ(fdi_rx_reg); udelay(100); @@ -1858,6 +2089,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } } } + temp = I915_READ(transconf_reg); /* BPC in transcoder is consistent with that in pipeconf */ temp &= ~PIPE_BPC_MASK; @@ -1866,35 +2098,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(transconf_reg); udelay(100); + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ + int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; + int reg; + + reg = I915_READ(trans_dp_ctl); + reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + I915_WRITE(trans_dp_ctl, reg); + POSTING_READ(trans_dp_ctl); + + /* disable DPLL_SEL */ + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0) + temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); + else + temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); + + } + /* disable PCH DPLL */ temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) != 0) { - I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } + I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); if (HAS_eDP) { ironlake_disable_pll_edp(crtc); } + /* Switch from PCDclk to Rawclk */ temp = I915_READ(fdi_rx_reg); temp &= ~FDI_SEL_PCDCLK; I915_WRITE(fdi_rx_reg, temp); I915_READ(fdi_rx_reg); + /* Disable CPU FDI TX PLL */ + temp = I915_READ(fdi_tx_reg); + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + temp = I915_READ(fdi_rx_reg); temp &= ~FDI_RX_PLL_ENABLE; I915_WRITE(fdi_rx_reg, temp); I915_READ(fdi_rx_reg); - /* Disable CPU FDI TX PLL */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) != 0) { - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - } - /* Wait for the clocks to turn off. */ udelay(100); break; @@ -2020,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) intel_wait_for_vblank(dev); } + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipeconf_reg == PIPEACONF && + (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + goto skip_pipe_off; + /* Next, disable display pipes */ temp = I915_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) != 0) { @@ -2035,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); I915_READ(dpll_reg); } - + skip_pipe_off: /* Wait for the clocks to turn off. */ udelay(150); break; @@ -2330,6 +2585,30 @@ static struct intel_watermark_params i830_wm_info = { I830_FIFO_LINE_SIZE }; +static struct intel_watermark_params ironlake_display_wm_info = { + ILK_DISPLAY_FIFO, + ILK_DISPLAY_MAXWM, + ILK_DISPLAY_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params ironlake_display_srwm_info = { + ILK_DISPLAY_SR_FIFO, + ILK_DISPLAY_MAX_SRWM, + ILK_DISPLAY_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params ironlake_cursor_srwm_info = { + ILK_CURSOR_SR_FIFO, + ILK_CURSOR_MAX_SRWM, + ILK_CURSOR_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; + /** * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock @@ -2381,6 +2660,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, struct cxsr_latency { int is_desktop; + int is_ddr3; unsigned long fsb_freq; unsigned long mem_freq; unsigned long display_sr; @@ -2390,33 +2670,45 @@ struct cxsr_latency { }; static struct cxsr_latency cxsr_latency_table[] = { - {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ - {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ - {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ - - {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ - {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ - {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ - - {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ - {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ - {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ - - {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ - {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ - {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ - - {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ - {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ - {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ - - {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ - {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ - {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; -static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, - int mem) +static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, + int fsb, int mem) { int i; struct cxsr_latency *latency; @@ -2427,6 +2719,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { latency = &cxsr_latency_table[i]; if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && fsb == latency->fsb_freq && mem == latency->mem_freq) return latency; } @@ -2448,66 +2741,6 @@ static void pineview_disable_cxsr(struct drm_device *dev) DRM_INFO("Big FIFO is disabled\n"); } -static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, - int pixel_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; - unsigned long wm; - struct cxsr_latency *latency; - - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, - dev_priv->mem_freq); - if (!latency) { - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - pineview_disable_cxsr(dev); - return; - } - - /* Display SR */ - wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, - latency->display_sr); - reg = I915_READ(DSPFW1); - reg &= 0x7fffff; - reg |= wm << 23; - I915_WRITE(DSPFW1, reg); - DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); - - /* cursor SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, - latency->cursor_sr); - reg = I915_READ(DSPFW3); - reg &= ~(0x3f << 24); - reg |= (wm & 0x3f) << 24; - I915_WRITE(DSPFW3, reg); - - /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, - latency->display_hpll_disable, I915_FIFO_LINE_SIZE); - reg = I915_READ(DSPFW3); - reg &= 0xfffffe00; - reg |= wm & 0x1ff; - I915_WRITE(DSPFW3, reg); - - /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, - latency->cursor_hpll_disable); - reg = I915_READ(DSPFW3); - reg &= ~(0x3f << 16); - reg |= (wm & 0x3f) << 16; - I915_WRITE(DSPFW3, reg); - DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); - - /* activate cxsr */ - reg = I915_READ(DSPFW3); - reg |= PINEVIEW_SELF_REFRESH_EN; - I915_WRITE(DSPFW3, reg); - - DRM_INFO("Big FIFO is enabled\n"); - - return; -} - /* * Latency for FIFO fetches is dependent on several factors: * - memory configuration (speed, channels) @@ -2592,6 +2825,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) return size; } +static void pineview_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + unsigned long wm; + struct cxsr_latency *latency; + int sr_clock; + + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, + dev_priv->fsb_freq, dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + pineview_disable_cxsr(dev); + return; + } + + if (!planea_clock || !planeb_clock) { + sr_clock = planea_clock ? planea_clock : planeb_clock; + + /* Display SR */ + wm = intel_calculate_wm(sr_clock, &pineview_display_wm, + pixel_size, latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= wm << DSPFW_SR_SHIFT; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, + pixel_size, latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_CURSOR_SR_MASK; + reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, + pixel_size, latency->display_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_SR_MASK; + reg |= wm & DSPFW_HPLL_SR_MASK; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, + pixel_size, latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + reg = I915_READ(DSPFW3); + reg |= PINEVIEW_SELF_REFRESH_EN; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG_KMS("Self-refresh is enabled\n"); + } else { + pineview_disable_cxsr(dev); + DRM_DEBUG_KMS("Self-refresh is disabled\n"); + } +} + static void g4x_update_wm(struct drm_device *dev, int planea_clock, int planeb_clock, int sr_hdisplay, int pixel_size) { @@ -2690,11 +2988,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; srwm &= 0x3f; - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + if (IS_I965GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + if (IS_I965GM(dev)) + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2812,6 +3112,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, I915_WRITE(FW_BLC, fwater_lo); } +#define ILK_LP0_PLANE_LATENCY 700 + +static void ironlake_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int sr_wm, cursor_wm; + unsigned long line_time_us; + int sr_clock, entries_required; + u32 reg_value; + + /* Calculate and update the watermark for plane A */ + if (planea_clock) { + entries_required = ((planea_clock / 1000) * pixel_size * + ILK_LP0_PLANE_LATENCY) / 1000; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_display_wm_info.cacheline_size); + planea_wm = entries_required + + ironlake_display_wm_info.guard_size; + + if (planea_wm > (int)ironlake_display_wm_info.max_wm) + planea_wm = ironlake_display_wm_info.max_wm; + + cursora_wm = 16; + reg_value = I915_READ(WM0_PIPEA_ILK); + reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | + (cursora_wm & WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, reg_value); + DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " + "cursor: %d\n", planea_wm, cursora_wm); + } + /* Calculate and update the watermark for plane B */ + if (planeb_clock) { + entries_required = ((planeb_clock / 1000) * pixel_size * + ILK_LP0_PLANE_LATENCY) / 1000; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_display_wm_info.cacheline_size); + planeb_wm = entries_required + + ironlake_display_wm_info.guard_size; + + if (planeb_wm > (int)ironlake_display_wm_info.max_wm) + planeb_wm = ironlake_display_wm_info.max_wm; + + cursorb_wm = 16; + reg_value = I915_READ(WM0_PIPEB_ILK); + reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | + (cursorb_wm & WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, reg_value); + DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " + "cursor: %d\n", planeb_wm, cursorb_wm); + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + */ + if (!planea_clock || !planeb_clock) { + int line_count; + /* Read the self-refresh latency. The unit is 0.5us */ + int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; + + sr_clock = planea_clock ? planea_clock : planeb_clock; + line_time_us = ((sr_hdisplay * 1000) / sr_clock); + + /* Use ns/us then divide to preserve precision */ + line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) + / 1000; + + /* calculate the self-refresh watermark for display plane */ + entries_required = line_count * sr_hdisplay * pixel_size; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_display_srwm_info.cacheline_size); + sr_wm = entries_required + + ironlake_display_srwm_info.guard_size; + + /* calculate the self-refresh watermark for display cursor */ + entries_required = line_count * pixel_size * 64; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_cursor_srwm_info.cacheline_size); + cursor_wm = entries_required + + ironlake_cursor_srwm_info.guard_size; + + /* configure watermark and enable self-refresh */ + reg_value = I915_READ(WM1_LP_ILK); + reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | + WM1_LP_CURSOR_MASK); + reg_value |= WM1_LP_SR_EN | + (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; + + I915_WRITE(WM1_LP_ILK, reg_value); + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", sr_wm, cursor_wm); + + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + } +} /** * intel_update_watermarks - update FIFO watermark values based on current modes * @@ -2881,12 +3283,6 @@ static void intel_update_watermarks(struct drm_device *dev) if (enabled <= 0) return; - /* Single plane configs can enable self refresh */ - if (enabled == 1 && IS_PINEVIEW(dev)) - pineview_enable_cxsr(dev, sr_clock, pixel_size); - else if (IS_PINEVIEW(dev)) - pineview_disable_cxsr(dev); - dev_priv->display.update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, pixel_size); } @@ -2916,14 +3312,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int refclk, num_outputs = 0; + int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; bool is_edp = false; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder = NULL; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; @@ -2934,6 +3331,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int trans_dpll_sel = (pipe == 0) ? 0 : 1; int lvds_reg = LVDS; u32 temp; int sdvo_pixel_multiply; @@ -2941,20 +3340,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_pre_modeset(dev, pipe); - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); + list_for_each_entry(encoder, &mode_config->encoder_list, head) { - if (!connector->encoder || connector->encoder->crtc != crtc) + if (!encoder || encoder->crtc != crtc) continue; - switch (intel_output->type) { + intel_encoder = enc_to_intel_encoder(encoder); + + switch (intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; - if (intel_output->needs_tv_clock) + if (intel_encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_DVO: @@ -2974,10 +3374,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, break; } - num_outputs++; + num_connectors++; } - if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { + if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); @@ -3042,14 +3442,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { - int lane, link_bw, bpp; + int lane = 0, link_bw, bpp; /* eDP doesn't require FDI link, so just set DP M/N according to current link config */ if (is_edp) { - struct drm_connector *edp; target_clock = mode->clock; - edp = intel_pipe_get_output(crtc); - intel_edp_link_config(to_intel_output(edp), + intel_edp_link_config(intel_encoder, &lane, &link_bw); } else { /* DP over FDI requires target mode clock @@ -3058,7 +3456,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, target_clock = mode->clock; else target_clock = adjusted_mode->clock; - lane = 4; link_bw = 270000; } @@ -3110,6 +3507,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, bpp = 24; } + if (!lane) { + /* + * Account for spread spectrum to avoid + * oversubscribing the link. Max center spread + * is 2.5%; use 5% for safety's sake. + */ + u32 bps = target_clock * bpp * 21 / 20; + lane = bps / (link_bw * 8) + 1; + } + + intel_crtc->fdi_lanes = lane; + ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); } @@ -3230,7 +3639,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; - else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) + else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -3291,6 +3700,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); } + /* enable transcoder DPLL */ + if (HAS_PCH_CPT(dev)) { + temp = I915_READ(PCH_DPLL_SEL); + if (trans_dpll_sel == 0) + temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + else + temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + I915_READ(PCH_DPLL_SEL); + udelay(150); + } + /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -3302,7 +3723,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, lvds_reg = PCH_LVDS; lvds = I915_READ(lvds_reg); - lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; + lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + if (pipe == 1) { + if (HAS_PCH_CPT(dev)) + lvds |= PORT_TRANS_B_SEL_CPT; + else + lvds |= LVDS_PIPEB_SELECT; + } else { + if (HAS_PCH_CPT(dev)) + lvds &= ~PORT_TRANS_SEL_MASK; + else + lvds &= ~LVDS_PIPEB_SELECT; + } /* set the corresponsding LVDS_BORDER bit */ lvds |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to @@ -3320,14 +3752,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* set the dithering flag */ if (IS_I965G(dev)) { if (dev_priv->lvds_dither) { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev)) { pipeconf |= PIPE_ENABLE_DITHER; - else + pipeconf &= ~PIPE_DITHER_TYPE_MASK; + pipeconf |= PIPE_DITHER_TYPE_ST01; + } else lvds |= LVDS_ENABLE_DITHER; } else { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev)) { pipeconf &= ~PIPE_ENABLE_DITHER; - else + pipeconf &= ~PIPE_DITHER_TYPE_MASK; + } else lvds &= ~LVDS_ENABLE_DITHER; } } @@ -3336,6 +3771,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } if (is_dp) intel_dp_set_m_n(crtc, mode, adjusted_mode); + else if (HAS_PCH_SPLIT(dev)) { + /* For non-DP output, clear any trans DP clock recovery setting.*/ + if (pipe == 0) { + I915_WRITE(TRANSA_DATA_M1, 0); + I915_WRITE(TRANSA_DATA_N1, 0); + I915_WRITE(TRANSA_DP_LINK_M1, 0); + I915_WRITE(TRANSA_DP_LINK_N1, 0); + } else { + I915_WRITE(TRANSB_DATA_M1, 0); + I915_WRITE(TRANSB_DATA_N1, 0); + I915_WRITE(TRANSB_DP_LINK_M1, 0); + I915_WRITE(TRANSB_DP_LINK_N1, 0); + } + } if (!is_edp) { I915_WRITE(fp_reg, fp); @@ -3376,6 +3825,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vdisplay -= 1; + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_start -= 1; + adjusted_mode->crtc_vblank_end -= 1; + adjusted_mode->crtc_vsync_end -= 1; + adjusted_mode->crtc_vsync_start -= 1; + } else + pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ + I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | @@ -3410,6 +3871,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* enable FDI RX PLL too */ temp = I915_READ(fdi_rx_reg); I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + I915_READ(fdi_rx_reg); + udelay(200); + + /* enable FDI TX PLL too */ + temp = I915_READ(fdi_tx_reg); + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + + /* enable FDI RX PCDCLK */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); udelay(200); } } @@ -3510,7 +3983,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!bo) return -ENOENT; - obj_priv = bo->driver_private; + obj_priv = to_intel_bo(bo); if (bo->size < width * height * 4) { DRM_ERROR("buffer is to small\n"); @@ -3526,6 +3999,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; } + + ret = i915_gem_object_set_to_gtt_domain(bo, 0); + if (ret) { + DRM_ERROR("failed to move cursor bo into the GTT\n"); + goto fail_unpin; + } + addr = obj_priv->gtt_offset; } else { ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); @@ -3569,6 +4049,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_bo = bo; return 0; +fail_unpin: + i915_gem_object_unpin(bo); fail_locked: mutex_unlock(&dev->struct_mutex); fail: @@ -3654,9 +4136,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, * detection. * * It will be up to the load-detect code to adjust the pipe as appropriate for - * its requirements. The pipe will be connected to no other outputs. + * its requirements. The pipe will be connected to no other encoders. * - * Currently this code will only succeed if there is a pipe with no outputs + * Currently this code will only succeed if there is a pipe with no encoders * configured for it. In the future, it could choose to temporarily disable * some outputs to free up a pipe for its use. * @@ -3669,14 +4151,15 @@ static struct drm_display_mode load_detect_mode = { 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; -struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, +struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct drm_display_mode *mode, int *dpms_mode) { struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_crtc *supported_crtc =NULL; - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -3728,8 +4211,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, } encoder->crtc = crtc; - intel_output->base.encoder = encoder; - intel_output->load_detect_temp = true; + connector->encoder = encoder; + intel_encoder->load_detect_temp = true; intel_crtc = to_intel_crtc(crtc); *dpms_mode = intel_crtc->dpms_mode; @@ -3754,23 +4237,24 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, return crtc; } -void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) +void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, int dpms_mode) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (intel_output->load_detect_temp) { + if (intel_encoder->load_detect_temp) { encoder->crtc = NULL; - intel_output->base.encoder = NULL; - intel_output->load_detect_temp = false; + connector->encoder = NULL; + intel_encoder->load_detect_temp = false; crtc->enabled = drm_helper_crtc_in_use(crtc); drm_helper_disable_unused_functions(dev); } - /* Switch crtc and output back off if necessary */ + /* Switch crtc and encoder back off if necessary */ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { if (encoder->crtc == crtc) encoder_funcs->dpms(encoder, dpms_mode); @@ -3947,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) DRM_DEBUG_DRIVER("upclocking LVDS\n"); /* Unlock panel regs */ - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | + PANEL_UNLOCK_REGS); dpll &= ~DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); @@ -3990,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) DRM_DEBUG_DRIVER("downclocking LVDS\n"); /* Unlock panel regs */ - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | + PANEL_UNLOCK_REGS); dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); @@ -4020,27 +4506,31 @@ static void intel_idle_update(struct work_struct *work) struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; + int enabled = 0; if (!i915_powersave) return; mutex_lock(&dev->struct_mutex); - if (IS_I945G(dev) || IS_I945GM(dev)) { - DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - } + i915_update_gfx_val(dev_priv); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) continue; + enabled++; intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->busy) intel_decrease_pllclock(crtc); } + if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { + DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + } + mutex_unlock(&dev->struct_mutex); } @@ -4136,10 +4626,10 @@ static void intel_unpin_work_fn(struct work_struct *__work) kfree(work); } -void intel_finish_page_flip(struct drm_device *dev, int pipe) +static void do_intel_finish_page_flip(struct drm_device *dev, + struct drm_crtc *crtc) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj_priv; @@ -4154,12 +4644,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { - if (work && !work->pending) { - obj_priv = work->pending_flip_obj->driver_private; - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", - obj_priv, - atomic_read(&obj_priv->pending_flip)); - } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4180,7 +4664,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = work->pending_flip_obj->driver_private; + obj_priv = to_intel_bo(work->pending_flip_obj); /* Initial scanout buffer will have a 0 pending flip count */ if ((atomic_read(&obj_priv->pending_flip) == 0) || @@ -4189,6 +4673,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) schedule_work(&work->work); } +void intel_finish_page_flip(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + + do_intel_finish_page_flip(dev, crtc); +} + +void intel_finish_page_flip_plane(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; + + do_intel_finish_page_flip(dev, crtc); +} + void intel_prepare_page_flip(struct drm_device *dev, int plane) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -4216,17 +4716,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; - unsigned long flags; + unsigned long flags, offset; int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; int ret, pipesrc; - RING_LOCALS; + u32 flip_mask; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; - mutex_lock(&dev->struct_mutex); - work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); @@ -4236,10 +4734,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); - mutex_unlock(&dev->struct_mutex); + + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); return -EBUSY; } intel_crtc->unpin_work = work; @@ -4248,13 +4746,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; + mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - obj->driver_private); - kfree(work); - intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); + + spin_lock_irqsave(&dev->event_lock, flags); + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + kfree(work); + + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", + to_intel_bo(obj)); return ret; } @@ -4265,20 +4769,37 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->fb = fb; i915_gem_object_flush_write_domain(obj); drm_vblank_get(dev, intel_crtc->pipe); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; + if (intel_crtc->plane) + flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + else + flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; + + /* Wait for any previous flip to finish */ + if (IS_GEN3(dev)) + while (I915_READ(ISR) & flip_mask) + ; + + /* Offset into the new buffer for cases of shared fbs between CRTCs */ + offset = obj_priv->gtt_offset; + offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); + BEGIN_LP_RING(4); - OUT_RING(MI_DISPLAY_FLIP | - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitch); if (IS_I965G(dev)) { - OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitch); + OUT_RING(offset | obj_priv->tiling_mode); pipesrc = I915_READ(pipesrc_reg); OUT_RING(pipesrc & 0x0fff0fff); } else { - OUT_RING(obj_priv->gtt_offset); + OUT_RING(MI_DISPLAY_FLIP_I915 | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitch); + OUT_RING(offset); OUT_RING(MI_NOOP); } ADVANCE_LP_RING(); @@ -4391,15 +4912,15 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) return crtc; } -static int intel_connector_clones(struct drm_device *dev, int type_mask) +static int intel_encoder_clones(struct drm_device *dev, int type_mask) { int index_mask = 0; - struct drm_connector *connector; + struct drm_encoder *encoder; int entry = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - if (type_mask & intel_output->clone_mask) + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + if (type_mask & intel_encoder->clone_mask) index_mask |= (1 << entry); entry++; } @@ -4410,7 +4931,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; + struct drm_encoder *encoder; intel_crt_init(dev); @@ -4425,9 +4946,8 @@ static void intel_setup_outputs(struct drm_device *dev) intel_dp_init(dev, DP_A); if (I915_READ(HDMIB) & PORT_DETECTED) { - /* check SDVOB */ - /* found = intel_sdvo_init(dev, HDMIB); */ - found = 0; + /* PCH SDVOB multiplex with HDMIB */ + found = intel_sdvo_init(dev, PCH_SDVOB); if (!found) intel_hdmi_init(dev, HDMIB); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) @@ -4493,23 +5013,18 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_encoder *encoder = &intel_output->enc; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - encoder->possible_crtcs = intel_output->crtc_mask; - encoder->possible_clones = intel_connector_clones(dev, - intel_output->clone_mask); + encoder->possible_crtcs = intel_encoder->crtc_mask; + encoder->possible_clones = intel_encoder_clones(dev, + intel_encoder->clone_mask); } } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_device *dev = fb->dev; - - if (fb->fbdev) - intelfb_remove(dev, fb); drm_framebuffer_cleanup(fb); drm_gem_object_unreference_unlocked(intel_fb->obj); @@ -4532,18 +5047,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { .create_handle = intel_user_framebuffer_create_handle, }; -int intel_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd *mode_cmd, - struct drm_framebuffer **fb, - struct drm_gem_object *obj) +int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *intel_fb, + struct drm_mode_fb_cmd *mode_cmd, + struct drm_gem_object *obj) { - struct intel_framebuffer *intel_fb; int ret; - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) - return -ENOMEM; - ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); @@ -4551,40 +5061,41 @@ int intel_framebuffer_create(struct drm_device *dev, } drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); - intel_fb->obj = obj; - - *fb = &intel_fb->base; - return 0; } - static struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { struct drm_gem_object *obj; - struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; int ret; obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); if (!obj) return NULL; - ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) + return NULL; + + ret = intel_framebuffer_init(dev, intel_fb, + mode_cmd, obj); if (ret) { drm_gem_object_unreference_unlocked(obj); + kfree(intel_fb); return NULL; } - return fb; + return &intel_fb->base; } static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, - .fb_changed = intelfb_probe, + .output_poll_changed = intel_fb_output_poll_changed, }; static struct drm_gem_object * @@ -4593,7 +5104,7 @@ intel_alloc_power_context(struct drm_device *dev) struct drm_gem_object *pwrctx; int ret; - pwrctx = drm_gem_object_alloc(dev, 4096); + pwrctx = i915_gem_alloc_object(dev, 4096); if (!pwrctx) { DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); return NULL; @@ -4623,10 +5134,32 @@ err_unref: return NULL; } +bool ironlake_set_drps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl; + + rgvswctl = I915_READ16(MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE16(MEMSWCTL, rgvswctl); + POSTING_READ16(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE16(MEMSWCTL, rgvswctl); + + return true; +} + void ironlake_enable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; + u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; int i = 0; @@ -4645,13 +5178,21 @@ void ironlake_enable_drps(struct drm_device *dev) fmin = (rgvmodectl & MEMMODE_FMIN_MASK); fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; + fstart = fmax; + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ + dev_priv->fmax = fstart; /* IPS callback will increase this */ + dev_priv->fstart = fstart; + + dev_priv->max_delay = fmax; dev_priv->min_delay = fmin; dev_priv->cur_delay = fstart; + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, + fstart); + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); /* @@ -4673,20 +5214,19 @@ void ironlake_enable_drps(struct drm_device *dev) } msleep(1); - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE(MEMSWCTL, rgvswctl); - POSTING_READ(MEMSWCTL); + ironlake_set_drps(dev, fstart); - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); + dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + I915_READ(0x112e0); + dev_priv->last_time1 = jiffies_to_msecs(jiffies); + dev_priv->last_count2 = I915_READ(0x112f4); + getrawmonotonic(&dev_priv->last_time2); } void ironlake_disable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvswctl; - u8 fstart; + u16 rgvswctl = I915_READ16(MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); @@ -4696,11 +5236,7 @@ void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE(MEMSWCTL, rgvswctl); + ironlake_set_drps(dev, dev_priv->fstart); msleep(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); @@ -4708,6 +5244,92 @@ void ironlake_disable_drps(struct drm_device *dev) } +static unsigned long intel_pxfreq(u32 vidfreq) +{ + unsigned long freq; + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + freq = ((div * 133333) / ((1<<post) * pre)); + + return freq; +} + +void intel_init_emon(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 lcfuse; + u8 pxw[16]; + int i; + + /* Disable to program */ + I915_WRITE(ECR, 0); + POSTING_READ(ECR); + + /* Program energy weights for various events */ + I915_WRITE(SDEW, 0x15040d00); + I915_WRITE(CSIEW0, 0x007f0000); + I915_WRITE(CSIEW1, 0x1e220004); + I915_WRITE(CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + I915_WRITE(PEW + (i * 4), 0); + for (i = 0; i < 3; i++) + I915_WRITE(DEW + (i * 4), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); + unsigned long freq = intel_pxfreq(pxvidfreq); + unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + unsigned long val; + + val = vid * vid; + val *= (freq / 1000); + val *= 255; + val /= (127*127*900); + if (val > 0xff) + DRM_ERROR("bad pxval: %ld\n", val); + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | + (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); + I915_WRITE(PXW + (i * 4), val); + } + + /* Adjust magic regs to magic values (more experimental results) */ + I915_WRITE(OGW0, 0); + I915_WRITE(OGW1, 0); + I915_WRITE(EG0, 0x00007f00); + I915_WRITE(EG1, 0x0000000e); + I915_WRITE(EG2, 0x000e0000); + I915_WRITE(EG3, 0x68000300); + I915_WRITE(EG4, 0x42000000); + I915_WRITE(EG5, 0x00140031); + I915_WRITE(EG6, 0); + I915_WRITE(EG7, 0); + + for (i = 0; i < 8; i++) + I915_WRITE(PXWL + (i * 4), 0); + + /* Enable PMON + select events */ + I915_WRITE(ECR, 0x80000019); + + lcfuse = I915_READ(LCFUSE02); + + dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); +} + void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4717,6 +5339,39 @@ void intel_init_clock_gating(struct drm_device *dev) * specs, but enable as much else as we can. */ if (HAS_PCH_SPLIT(dev)) { + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + if (IS_IRONLAKE(dev)) { + /* Required for FBC */ + dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; + /* Required for CxSR */ + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_3DCGDIS0, + MARIUNIT_CLOCK_GATE_DISABLE | + SVSMUNIT_CLOCK_GATE_DISABLE); + } + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + /* + * According to the spec the following bits should be set in + * order to enable memory self-refresh + * The bit 22/21 of 0x42004 + * The bit 5 of 0x42020 + * The bit 15 of 0x45000 + */ + if (IS_IRONLAKE(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN2, + (I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); + I915_WRITE(ILK_DSPCLK_GATE, + (I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE)); + I915_WRITE(DISP_ARB_CTL, + (I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS)); + } return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -4764,14 +5419,14 @@ void intel_init_clock_gating(struct drm_device *dev) struct drm_i915_gem_object *obj_priv = NULL; if (dev_priv->pwrctx) { - obj_priv = dev_priv->pwrctx->driver_private; + obj_priv = to_intel_bo(dev_priv->pwrctx); } else { struct drm_gem_object *pwrctx; pwrctx = intel_alloc_power_context(dev); if (pwrctx) { dev_priv->pwrctx = pwrctx; - obj_priv = pwrctx->driver_private; + obj_priv = to_intel_bo(pwrctx); } } @@ -4794,13 +5449,12 @@ static void intel_init_display(struct drm_device *dev) else dev_priv->display.dpms = i9xx_crtc_dpms; - /* Only mobile has FBC, leave pointers NULL for other chips */ - if (IS_MOBILE(dev)) { + if (I915_HAS_FBC(dev)) { if (IS_GM45(dev)) { dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { + } else if (IS_I965GM(dev)) { dev_priv->display.fbc_enabled = i8xx_fbc_enabled; dev_priv->display.enable_fbc = i8xx_enable_fbc; dev_priv->display.disable_fbc = i8xx_disable_fbc; @@ -4832,30 +5486,114 @@ static void intel_init_display(struct drm_device *dev) i830_get_display_clock_speed; /* For FIFO watermark updates */ - if (HAS_PCH_SPLIT(dev)) - dev_priv->display.update_wm = NULL; - else if (IS_G4X(dev)) + if (HAS_PCH_SPLIT(dev)) { + if (IS_IRONLAKE(dev)) { + if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + dev_priv->display.update_wm = ironlake_update_wm; + else { + DRM_DEBUG_KMS("Failed to get proper latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + } else + dev_priv->display.update_wm = NULL; + } else if (IS_PINEVIEW(dev)) { + if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + DRM_INFO("failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3": "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + pineview_disable_cxsr(dev); + dev_priv->display.update_wm = NULL; + } else + dev_priv->display.update_wm = pineview_update_wm; + } else if (IS_G4X(dev)) dev_priv->display.update_wm = g4x_update_wm; else if (IS_I965G(dev)) dev_priv->display.update_wm = i965_update_wm; - else if (IS_I9XX(dev) || IS_MOBILE(dev)) { + else if (IS_I9XX(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + } else if (IS_I85X(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i85x_get_fifo_size; } else { - if (IS_I85X(dev)) - dev_priv->display.get_fifo_size = i85x_get_fifo_size; - else if (IS_845G(dev)) + dev_priv->display.update_wm = i830_update_wm; + if (IS_845G(dev)) dev_priv->display.get_fifo_size = i845_get_fifo_size; else dev_priv->display.get_fifo_size = i830_get_fifo_size; - dev_priv->display.update_wm = i830_update_wm; + } +} + +/* + * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, + * resume, or other times. This quirk makes sure that's the case for + * affected systems. + */ +static void quirk_pipea_force (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->quirks |= QUIRK_PIPEA_FORCE; + DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); +} + +struct intel_quirk { + int device; + int subsystem_vendor; + int subsystem_device; + void (*hook)(struct drm_device *dev); +}; + +struct intel_quirk intel_quirks[] = { + /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ + { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, + /* HP Mini needs pipe A force quirk (LP: #322104) */ + { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, + + /* Thinkpad R31 needs pipe A force quirk */ + { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, + /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ + { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, + + /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ + { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, + /* ThinkPad X40 needs pipe A force quirk */ + + /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ + { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, + + /* 855 & before need to leave pipe A & dpll A up */ + { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, +}; + +static void intel_init_quirks(struct drm_device *dev) +{ + struct pci_dev *d = dev->pdev; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { + struct intel_quirk *q = &intel_quirks[i]; + + if (d->device == q->device && + (d->subsystem_vendor == q->subsystem_vendor || + q->subsystem_vendor == PCI_ANY_ID) && + (d->subsystem_device == q->subsystem_device || + q->subsystem_device == PCI_ANY_ID)) + q->hook(dev); } } void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int num_pipe; int i; drm_mode_config_init(dev); @@ -4865,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = (void *)&intel_mode_funcs; + intel_init_quirks(dev); + intel_init_display(dev); if (IS_I965G(dev)) { @@ -4885,13 +5625,13 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); if (IS_MOBILE(dev) || IS_I9XX(dev)) - num_pipe = 2; + dev_priv->num_pipe = 2; else - num_pipe = 1; + dev_priv->num_pipe = 1; DRM_DEBUG_KMS("%d display pipe%s available.\n", - num_pipe, num_pipe > 1 ? "s" : ""); + dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); - for (i = 0; i < num_pipe; i++) { + for (i = 0; i < dev_priv->num_pipe; i++) { intel_crtc_init(dev, i); } @@ -4899,21 +5639,16 @@ void intel_modeset_init(struct drm_device *dev) intel_init_clock_gating(dev); - if (IS_IRONLAKE_M(dev)) + if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); + intel_init_emon(dev); + } INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); intel_setup_overlay(dev); - - if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), - dev_priv->fsb_freq, - dev_priv->mem_freq)) - DRM_INFO("failed to find known CxSR latency " - "(found fsb freq %d, mem freq %d), disabling CxSR\n", - dev_priv->fsb_freq, dev_priv->mem_freq); } void intel_modeset_cleanup(struct drm_device *dev) @@ -4924,6 +5659,9 @@ void intel_modeset_cleanup(struct drm_device *dev) mutex_lock(&dev->struct_mutex); + drm_kms_helper_poll_fini(dev); + intel_fbdev_fini(dev); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) @@ -4942,7 +5680,7 @@ void intel_modeset_cleanup(struct drm_device *dev) if (dev_priv->pwrctx) { struct drm_i915_gem_object *obj_priv; - obj_priv = dev_priv->pwrctx->driver_private; + obj_priv = to_intel_bo(dev_priv->pwrctx); I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); I915_READ(PWRCTXA); i915_gem_object_unpin(dev_priv->pwrctx); @@ -4958,14 +5696,29 @@ void intel_modeset_cleanup(struct drm_device *dev) } -/* current intel driver doesn't take advantage of encoders - always give back the encoder for the connector -*/ -struct drm_encoder *intel_best_encoder(struct drm_connector *connector) +/* + * Return which encoder is currently attached for connector. + */ +struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct drm_mode_object *obj; + struct drm_encoder *encoder; + int i; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; - return &intel_output->enc; + obj = drm_mode_object_find(connector->dev, + connector->encoder_ids[i], + DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + + encoder = obj_to_encoder(obj); + return encoder; + } + return NULL; } /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3ef3a0d0edd0..5dde80f9e652 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -26,6 +26,7 @@ */ #include <linux/i2c.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" @@ -47,30 +48,28 @@ struct intel_dp_priv { uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; - uint32_t save_DP; - uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; int dpms_mode; uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[4]; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; }; static void -intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, +intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); static void -intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); +intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); void -intel_edp_link_config (struct intel_output *intel_output, +intel_edp_link_config (struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; *lane_num = dp_priv->lane_count; if (dp_priv->link_bw == DP_LINK_BW_1_62) @@ -80,9 +79,9 @@ intel_edp_link_config (struct intel_output *intel_output, } static int -intel_dp_max_lane_count(struct intel_output *intel_output) +intel_dp_max_lane_count(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int max_lane_count = 4; if (dp_priv->dpcd[0] >= 0x11) { @@ -98,9 +97,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) } static int -intel_dp_max_link_bw(struct intel_output *intel_output) +intel_dp_max_link_bw(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int max_link_bw = dp_priv->dpcd[1]; switch (max_link_bw) { @@ -126,26 +125,36 @@ intel_dp_link_clock(uint8_t link_bw) /* I think this is a fiction */ static int intel_dp_link_required(struct drm_device *dev, - struct intel_output *intel_output, int pixel_clock) + struct intel_encoder *intel_encoder, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_eDP(intel_output)) + if (IS_eDP(intel_encoder)) return (pixel_clock * dev_priv->edp_bpp) / 8; else return pixel_clock * 3; } static int +intel_dp_max_data_rate(int max_link_clock, int max_lanes) +{ + return (max_link_clock * max_lanes * 8) / 10; +} + +static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); - int max_lanes = intel_dp_max_lane_count(intel_output); - - if (intel_dp_link_required(connector->dev, intel_output, mode->clock) - > max_link_clock * max_lanes) + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); + int max_lanes = intel_dp_max_lane_count(intel_encoder); + + /* only refuse the mode on non eDP since we have seen some wierd eDP panels + which are outside spec tolerances but somehow work by magic */ + if (!IS_eDP(intel_encoder) && + (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) + > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; if (mode->clock < 10000) @@ -208,13 +217,13 @@ intel_hrawclk(struct drm_device *dev) } static int -intel_dp_aux_ch(struct intel_output *intel_output, +intel_dp_aux_ch(struct intel_encoder *intel_encoder, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint32_t output_reg = dp_priv->output_reg; - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -223,19 +232,27 @@ intel_dp_aux_ch(struct intel_output *intel_output, uint32_t ctl; uint32_t status; uint32_t aux_clock_divider; - int try; + int try, precharge; /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that */ - if (IS_eDP(intel_output)) - aux_clock_divider = 225; /* eDP input clock at 450Mhz */ - else if (HAS_PCH_SPLIT(dev)) + if (IS_eDP(intel_encoder)) { + if (IS_GEN6(dev)) + aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ + else + aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + } else if (HAS_PCH_SPLIT(dev)) aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; + if (IS_GEN6(dev)) + precharge = 3; + else + precharge = 5; + /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ @@ -248,7 +265,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, ctl = (DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_TIME_OUT_400us | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | @@ -312,7 +329,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, /* Write data to the aux channel in native mode */ static int -intel_dp_aux_native_write(struct intel_output *intel_output, +intel_dp_aux_native_write(struct intel_encoder *intel_encoder, uint16_t address, uint8_t *send, int send_bytes) { int ret; @@ -329,7 +346,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; for (;;) { - ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) @@ -344,15 +361,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, /* Write a single byte to the aux channel in native mode */ static int -intel_dp_aux_native_write_1(struct intel_output *intel_output, +intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, uint16_t address, uint8_t byte) { - return intel_dp_aux_native_write(intel_output, address, &byte, 1); + return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); } /* read bytes from a native aux channel */ static int -intel_dp_aux_native_read(struct intel_output *intel_output, +intel_dp_aux_native_read(struct intel_encoder *intel_encoder, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; @@ -371,7 +388,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, reply_bytes = recv_bytes + 1; for (;;) { - ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, reply, reply_bytes); if (ret == 0) return -EPROTO; @@ -397,7 +414,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, struct intel_dp_priv *dp_priv = container_of(adapter, struct intel_dp_priv, adapter); - struct intel_output *intel_output = dp_priv->intel_output; + struct intel_encoder *intel_encoder = dp_priv->intel_encoder; uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; @@ -436,7 +453,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } for (;;) { - ret = intel_dp_aux_ch(intel_output, + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, reply, reply_bytes); if (ret < 0) { @@ -464,9 +481,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } static int -intel_dp_i2c_init(struct intel_output *intel_output, const char *name) +intel_dp_i2c_init(struct intel_encoder *intel_encoder, + struct intel_connector *intel_connector, const char *name) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; DRM_DEBUG_KMS("i2c_init %s\n", name); dp_priv->algo.running = false; @@ -479,7 +497,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; dp_priv->adapter.algo_data = &dp_priv->algo; - dp_priv->adapter.dev.parent = &intel_output->base.kdev; + dp_priv->adapter.dev.parent = &intel_connector->base.kdev; return i2c_dp_aux_add_bus(&dp_priv->adapter); } @@ -488,18 +506,18 @@ static bool intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int lane_count, clock; - int max_lane_count = intel_dp_max_lane_count(intel_output); - int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; + int max_lane_count = intel_dp_max_lane_count(intel_encoder); + int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { - int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; + int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); - if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) + if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) <= link_avail) { dp_priv->link_bw = bws[clock]; dp_priv->lane_count = lane_count; @@ -512,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } } + + if (IS_eDP(intel_encoder)) { + /* okay we failed just pick the highest */ + dp_priv->lane_count = max_lane_count; + dp_priv->link_bw = bws[max_clock]; + adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); + DRM_DEBUG_KMS("Force picking display port link bw %02x lane " + "count %d clock %d\n", + dp_priv->link_bw, dp_priv->lane_count, + adjusted_mode->clock); + return true; + } return false; } @@ -554,23 +584,26 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; + struct drm_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4; struct intel_dp_m_n m_n; /* - * Find the lane count in the intel_output private + * Find the lane count in the intel_encoder private */ - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_encoder *intel_encoder; + struct intel_dp_priv *dp_priv; - if (!connector->encoder || connector->encoder->crtc != crtc) + if (encoder->crtc != crtc) continue; - if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { + intel_encoder = enc_to_intel_encoder(encoder); + dp_priv = intel_encoder->dev_priv; + + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = dp_priv->lane_count; break; } @@ -625,16 +658,24 @@ static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - struct drm_crtc *crtc = intel_output->enc.crtc; + struct drm_device *dev = encoder->dev; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_crtc *crtc = intel_encoder->enc.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - dp_priv->DP = (DP_LINK_TRAIN_OFF | - DP_VOLTAGE_0_4 | - DP_PRE_EMPHASIS_0 | - DP_SYNC_VS_HIGH | - DP_SYNC_HS_HIGH); + dp_priv->DP = (DP_VOLTAGE_0_4 | + DP_PRE_EMPHASIS_0); + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + dp_priv->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + dp_priv->DP |= DP_SYNC_VS_HIGH; + + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; + else + dp_priv->DP |= DP_LINK_TRAIN_OFF; switch (dp_priv->lane_count) { case 1: @@ -655,18 +696,18 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, dp_priv->link_configuration[1] = dp_priv->lane_count; /* - * Check for DPCD version > 1.1, - * enable enahanced frame stuff in that case + * Check for DPCD version > 1.1 and enhanced framing support */ - if (dp_priv->dpcd[0] >= 0x11) { + if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; dp_priv->DP |= DP_ENHANCED_FRAMING; } - if (intel_crtc->pipe == 1) + /* CPT DP's pipe select is decided in TRANS_DP_CTL */ + if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) dp_priv->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { /* don't miss out required setting for eDP */ dp_priv->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) @@ -676,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } } +static void ironlake_edp_panel_on (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + u32 pp, pp_status; + + pp_status = I915_READ(PCH_PP_STATUS); + if (pp_status & PP_ON) + return; + + pp = I915_READ(PCH_PP_CONTROL); + pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; + I915_WRITE(PCH_PP_CONTROL, pp); + do { + pp_status = I915_READ(PCH_PP_STATUS); + } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); + + if (time_after(jiffies, timeout)) + DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); + + pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); + I915_WRITE(PCH_PP_CONTROL, pp); +} + +static void ironlake_edp_panel_off (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + u32 pp, pp_status; + + pp = I915_READ(PCH_PP_CONTROL); + pp &= ~POWER_TARGET_ON; + I915_WRITE(PCH_PP_CONTROL, pp); + do { + pp_status = I915_READ(PCH_PP_STATUS); + } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); + + if (time_after(jiffies, timeout)) + DRM_DEBUG_KMS("panel off wait timed out\n"); + + /* Make sure VDD is enabled so DP AUX will work */ + pp |= EDP_FORCE_VDD; + I915_WRITE(PCH_PP_CONTROL, pp); +} + static void ironlake_edp_backlight_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -701,23 +787,27 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) static void intel_dp_dpms(struct drm_encoder *encoder, int mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - struct drm_device *dev = intel_output->base.dev; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(dp_priv->output_reg); if (mode != DRM_MODE_DPMS_ON) { if (dp_reg & DP_PORT_EN) { - intel_dp_link_down(intel_output, dp_priv->DP); - if (IS_eDP(intel_output)) + intel_dp_link_down(intel_encoder, dp_priv->DP); + if (IS_eDP(intel_encoder)) { ironlake_edp_backlight_off(dev); + ironlake_edp_panel_off(dev); + } } } else { if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); - if (IS_eDP(intel_output)) + intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); + if (IS_eDP(intel_encoder)) { + ironlake_edp_panel_on(dev); ironlake_edp_backlight_on(dev); + } } } dp_priv->dpms_mode = mode; @@ -728,12 +818,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) * link status information */ static bool -intel_dp_get_link_status(struct intel_output *intel_output, +intel_dp_get_link_status(struct intel_encoder *intel_encoder, uint8_t link_status[DP_LINK_STATUS_SIZE]) { int ret; - ret = intel_dp_aux_native_read(intel_output, + ret = intel_dp_aux_native_read(intel_encoder, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE); if (ret != DP_LINK_STATUS_SIZE) @@ -748,20 +838,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], return link_status[r - DP_LANE0_1_STATUS]; } -static void -intel_dp_save(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - - dp_priv->save_DP = I915_READ(dp_priv->output_reg); - intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, - dp_priv->save_link_configuration, - sizeof (dp_priv->save_link_configuration)); -} - static uint8_t intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) @@ -824,7 +900,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_output *intel_output, +intel_get_adjust_train(struct intel_encoder *intel_encoder, uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count, uint8_t train_set[4]) @@ -891,6 +967,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) return signal_levels; } +/* Gen6's DP voltage swing and pre-emphasis control */ +static uint32_t +intel_gen6_edp_signal_levels(uint8_t train_set) +{ + switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_400MV_0DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: + return EDP_LINK_TRAIN_400MV_6DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_800MV_0DB_SNB_B; + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); + return EDP_LINK_TRAIN_400MV_0DB_SNB_B; + } +} + static uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) @@ -941,15 +1036,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) } static bool -intel_dp_set_link_train(struct intel_output *intel_output, +intel_dp_set_link_train(struct intel_encoder *intel_encoder, uint32_t dp_reg_value, uint8_t dp_train_pat, uint8_t train_set[4], bool first) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int ret; I915_WRITE(dp_priv->output_reg, dp_reg_value); @@ -957,11 +1052,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, if (first) intel_wait_for_vblank(dev); - intel_dp_aux_native_write_1(intel_output, + intel_dp_aux_native_write_1(intel_encoder, DP_TRAINING_PATTERN_SET, dp_train_pat); - ret = intel_dp_aux_native_write(intel_output, + ret = intel_dp_aux_native_write(intel_encoder, DP_TRAINING_LANE0_SET, train_set, 4); if (ret != 4) return false; @@ -970,12 +1065,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, } static void -intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, +intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; int i; @@ -984,30 +1079,45 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, bool channel_eq = false; bool first = true; int tries; + u32 reg; /* Write the link configuration data */ - intel_dp_aux_native_write(intel_output, 0x100, + intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, link_configuration, DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; - DP &= ~DP_LINK_TRAIN_MASK; + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + DP &= ~DP_LINK_TRAIN_MASK_CPT; + else + DP &= ~DP_LINK_TRAIN_MASK; memset(train_set, 0, 4); voltage = 0xff; tries = 0; clock_recovery = false; for (;;) { /* Use train_set[0] to set the voltage and pre emphasis values */ - uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); - DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + uint32_t signal_levels; + if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { + signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; + } else { + signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + } + + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + reg = DP | DP_LINK_TRAIN_PAT_1_CPT; + else + reg = DP | DP_LINK_TRAIN_PAT_1; - if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, + if (!intel_dp_set_link_train(intel_encoder, reg, DP_TRAINING_PATTERN_1, train_set, first)) break; first = false; /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_output, link_status)) + if (!intel_dp_get_link_status(intel_encoder, link_status)) break; if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { @@ -1032,7 +1142,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); } /* channel equalization */ @@ -1040,17 +1150,29 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, channel_eq = false; for (;;) { /* Use train_set[0] to set the voltage and pre emphasis values */ - uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); - DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + uint32_t signal_levels; + + if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { + signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; + } else { + signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + } + + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + reg = DP | DP_LINK_TRAIN_PAT_2_CPT; + else + reg = DP | DP_LINK_TRAIN_PAT_2; /* channel eq pattern */ - if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, + if (!intel_dp_set_link_train(intel_encoder, reg, DP_TRAINING_PATTERN_2, train_set, false)) break; udelay(400); - if (!intel_dp_get_link_status(intel_output, link_status)) + if (!intel_dp_get_link_status(intel_encoder, link_status)) break; if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { @@ -1063,56 +1185,55 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, break; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); ++tries; } - I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + reg = DP | DP_LINK_TRAIN_OFF_CPT; + else + reg = DP | DP_LINK_TRAIN_OFF; + + I915_WRITE(dp_priv->output_reg, reg); POSTING_READ(dp_priv->output_reg); - intel_dp_aux_native_write_1(intel_output, + intel_dp_aux_native_write_1(intel_encoder, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void -intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) +intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(dp_priv->output_reg, DP); POSTING_READ(dp_priv->output_reg); udelay(100); } - DP &= ~DP_LINK_TRAIN_MASK; - I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); - POSTING_READ(dp_priv->output_reg); + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { + DP &= ~DP_LINK_TRAIN_MASK_CPT; + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); + POSTING_READ(dp_priv->output_reg); + } else { + DP &= ~DP_LINK_TRAIN_MASK; + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + POSTING_READ(dp_priv->output_reg); + } udelay(17000); - if (IS_eDP(intel_output)) + if (IS_eDP(intel_encoder)) DP |= DP_LINK_TRAIN_OFF; I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); POSTING_READ(dp_priv->output_reg); } -static void -intel_dp_restore(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - - if (dp_priv->save_DP & DP_PORT_EN) - intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); - else - intel_dp_link_down(intel_output, dp_priv->save_DP); -} - /* * According to DP spec * 5.1.2: @@ -1123,38 +1244,41 @@ intel_dp_restore(struct drm_connector *connector) */ static void -intel_dp_check_link_status(struct intel_output *intel_output) +intel_dp_check_link_status(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t link_status[DP_LINK_STATUS_SIZE]; - if (!intel_output->enc.crtc) + if (!intel_encoder->enc.crtc) return; - if (!intel_dp_get_link_status(intel_output, link_status)) { - intel_dp_link_down(intel_output, dp_priv->DP); + if (!intel_dp_get_link_status(intel_encoder, link_status)) { + intel_dp_link_down(intel_encoder, dp_priv->DP); return; } if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) - intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); + intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); } static enum drm_connector_status ironlake_dp_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; enum drm_connector_status status; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_output, + if (intel_dp_aux_native_read(intel_encoder, 0x000, dp_priv->dpcd, sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) { if (dp_priv->dpcd[0] != 0) status = connector_status_connected; } + DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], + dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); return status; } @@ -1167,10 +1291,11 @@ ironlake_dp_detect(struct drm_connector *connector) static enum drm_connector_status intel_dp_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint32_t temp, bit; enum drm_connector_status status; @@ -1179,16 +1304,6 @@ intel_dp_detect(struct drm_connector *connector) if (HAS_PCH_SPLIT(dev)) return ironlake_dp_detect(connector); - temp = I915_READ(PORT_HOTPLUG_EN); - - I915_WRITE(PORT_HOTPLUG_EN, - temp | - DPB_HOTPLUG_INT_EN | - DPC_HOTPLUG_INT_EN | - DPD_HOTPLUG_INT_EN); - - POSTING_READ(PORT_HOTPLUG_EN); - switch (dp_priv->output_reg) { case DP_B: bit = DPB_HOTPLUG_INT_STATUS; @@ -1209,7 +1324,7 @@ intel_dp_detect(struct drm_connector *connector) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_output, + if (intel_dp_aux_native_read(intel_encoder, 0x000, dp_priv->dpcd, sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) { @@ -1221,20 +1336,21 @@ intel_dp_detect(struct drm_connector *connector) static int intel_dp_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_ddc_get_modes(intel_output); + ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); if (ret) return ret; /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1248,13 +1364,9 @@ static int intel_dp_get_modes(struct drm_connector *connector) static void intel_dp_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(connector); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { @@ -1267,8 +1379,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_dp_save, - .restore = intel_dp_restore, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_dp_destroy, @@ -1277,12 +1387,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_dp_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -1290,12 +1405,34 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { }; void -intel_dp_hot_plug(struct intel_output *intel_output) +intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) - intel_dp_check_link_status(intel_output); + intel_dp_check_link_status(intel_encoder); +} + +/* Return which DP Port should be selected for Transcoder DP control */ +int +intel_trans_dp_port_sel (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder = NULL; + + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + if (encoder->crtc != crtc) + continue; + + intel_encoder = enc_to_intel_encoder(encoder); + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + return dp_priv->output_reg; + } + } + return -1; } void @@ -1303,53 +1440,62 @@ intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct intel_dp_priv *dp_priv; const char *name = NULL; - intel_output = kcalloc(sizeof(struct intel_output) + + intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); - if (!intel_output) + if (!intel_encoder) return; - dp_priv = (struct intel_dp_priv *)(intel_output + 1); + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); + return; + } - connector = &intel_output->base; + dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); + + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + connector->polled = DRM_CONNECTOR_POLL_HPD; + if (output_reg == DP_A) - intel_output->type = INTEL_OUTPUT_EDP; + intel_encoder->type = INTEL_OUTPUT_EDP; else - intel_output->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; if (output_reg == DP_B || output_reg == PCH_DP_B) - intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) - intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); else if (output_reg == DP_D || output_reg == PCH_DP_D) - intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_output)) - intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); + if (IS_eDP(intel_encoder)) + intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = true; connector->doublescan_allowed = 0; - dp_priv->intel_output = intel_output; + dp_priv->intel_encoder = intel_encoder; dp_priv->output_reg = output_reg; dp_priv->has_audio = false; dp_priv->dpms_mode = DRM_MODE_DPMS_ON; - intel_output->dev_priv = dp_priv; + intel_encoder->dev_priv = dp_priv; - drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ @@ -1377,10 +1523,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) break; } - intel_dp_i2c_init(intel_output, name); + intel_dp_i2c_init(intel_encoder, intel_connector, name); - intel_output->ddc_bus = &dp_priv->adapter; - intel_output->hot_plug = intel_dp_hot_plug; + intel_encoder->ddc_bus = &dp_priv->adapter; + intel_encoder->hot_plug = intel_dp_hot_plug; if (output_reg == DP_A) { /* initialize panel mode from VBT if available for eDP */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3a467ca57857..2f7970be9051 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -95,9 +95,7 @@ struct intel_framebuffer { }; -struct intel_output { - struct drm_connector base; - +struct intel_encoder { struct drm_encoder enc; int type; struct i2c_adapter *i2c_bus; @@ -105,11 +103,16 @@ struct intel_output { bool load_detect_temp; bool needs_tv_clock; void *dev_priv; - void (*hot_plug)(struct intel_output *); + void (*hot_plug)(struct intel_encoder *); int crtc_mask; int clone_mask; }; +struct intel_connector { + struct drm_connector base; + void *dev_priv; +}; + struct intel_crtc; struct intel_overlay { struct drm_device *dev; @@ -149,18 +152,19 @@ struct intel_crtc { bool lowfreq_avail; struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; + int fdi_lanes; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) -#define to_intel_output(x) container_of(x, struct intel_output, base) -#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) +#define to_intel_connector(x) container_of(x, struct intel_connector, base) +#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name); void intel_i2c_destroy(struct i2c_adapter *adapter); -int intel_ddc_get_modes(struct intel_output *intel_output); -extern bool intel_ddc_probe(struct intel_output *intel_output); +int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); +extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); void intel_i2c_quirk_set(struct drm_device *dev, bool enable); void intel_i2c_reset_gmbus(struct drm_device *dev); @@ -175,7 +179,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern void intel_edp_link_config (struct intel_output *, int *, int *); +extern void intel_edp_link_config (struct intel_encoder *, int *, int *); extern int intel_panel_fitter_pipe (struct drm_device *dev); @@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); -extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); +extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); @@ -191,18 +195,17 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); -extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, +extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct drm_display_mode *mode, int *dpms_mode); -extern void intel_release_load_detect_pipe(struct intel_output *intel_output, +extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, int dpms_mode); extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); -extern int intelfb_probe(struct drm_device *dev); -extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); -extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); @@ -212,13 +215,19 @@ extern void intel_init_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); -extern int intel_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd *mode_cmd, - struct drm_framebuffer **fb, - struct drm_gem_object *obj); +extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, + struct drm_gem_object *obj); + +extern int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *ifb, + struct drm_mode_fb_cmd *mode_cmd, + struct drm_gem_object *obj); +extern int intel_fbdev_init(struct drm_device *dev); +extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_prepare_page_flip(struct drm_device *dev, int plane); extern void intel_finish_page_flip(struct drm_device *dev, int pipe); +extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); @@ -229,4 +238,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv); + +extern void intel_fb_output_poll_changed(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a4d2606de778..227feca7cf8d 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -25,6 +25,7 @@ * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" @@ -79,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; u32 dvo_reg = dvo->dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -95,40 +96,12 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) } } -static void intel_dvo_save(struct drm_connector *connector) -{ - struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; - - /* Each output should probably just save the registers it touches, - * but for now, use more overkill. - */ - dev_priv->saveDVOA = I915_READ(DVOA); - dev_priv->saveDVOB = I915_READ(DVOB); - dev_priv->saveDVOC = I915_READ(DVOC); - - dvo->dev_ops->save(dvo); -} - -static void intel_dvo_restore(struct drm_connector *connector) -{ - struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; - - dvo->dev_ops->restore(dvo); - - I915_WRITE(DVOA, dev_priv->saveDVOA); - I915_WRITE(DVOB, dev_priv->saveDVOB); - I915_WRITE(DVOC, dev_priv->saveDVOC); -} - static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -149,8 +122,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -185,8 +158,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; @@ -240,23 +213,25 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, */ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; return dvo->dev_ops->detect(dvo); } static int intel_dvo_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(intel_output); + intel_ddc_get_modes(connector, intel_encoder->ddc_bus); if (!list_empty(&connector->probed_modes)) return 1; @@ -274,39 +249,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector) static void intel_dvo_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; - - if (dvo) { - if (dvo->dev_ops->destroy) - dvo->dev_ops->destroy(dvo); - if (dvo->panel_fixed_mode) - kfree(dvo->panel_fixed_mode); - /* no need, in i830_dvoices[] now */ - //kfree(dvo); - } - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(connector); } -#ifdef RANDR_GET_CRTC_INTERFACE -static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; - int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); - - return intel_pipe_to_crtc(pScrn, pipe); -} -#endif - static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { .dpms = intel_dvo_dpms, .mode_fixup = intel_dvo_mode_fixup, @@ -317,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_dvo_save, - .restore = intel_dvo_restore, .detect = intel_dvo_detect, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, @@ -327,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + if (dvo) { + if (dvo->dev_ops->destroy) + dvo->dev_ops->destroy(dvo); + if (dvo->panel_fixed_mode) + kfree(dvo->panel_fixed_mode); + } + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_dvo_enc_funcs = { @@ -351,8 +310,9 @@ intel_dvo_get_current_mode (struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; uint32_t dvo_reg = dvo->dvo_reg; uint32_t dvo_val = I915_READ(dvo_reg); struct drm_display_mode *mode = NULL; @@ -382,24 +342,31 @@ intel_dvo_get_current_mode (struct drm_connector *connector) void intel_dvo_init(struct drm_device *dev) { - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct intel_dvo_device *dvo; struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; - intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); - if (!intel_output) + intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); + if (!intel_encoder) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); return; + } /* Set up the DDC bus */ - intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); - if (!intel_output->ddc_bus) + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); + if (!intel_encoder->ddc_bus) goto free_intel; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { - struct drm_connector *connector = &intel_output->base; + struct drm_connector *connector = &intel_connector->base; int gpio; dvo = &intel_dvo_devices[i]; @@ -434,11 +401,11 @@ void intel_dvo_init(struct drm_device *dev) if (!ret) continue; - intel_output->type = INTEL_OUTPUT_DVO; - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->type = INTEL_OUTPUT_DVO; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: - intel_output->clone_mask = + intel_encoder->clone_mask = (1 << INTEL_DVO_TMDS_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); drm_connector_init(dev, connector, @@ -447,7 +414,7 @@ void intel_dvo_init(struct drm_device *dev) encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: - intel_output->clone_mask = + intel_encoder->clone_mask = (1 << INTEL_DVO_LVDS_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, @@ -462,16 +429,16 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - intel_output->dev_priv = dvo; - intel_output->i2c_bus = i2cbus; + intel_encoder->dev_priv = dvo; + intel_encoder->i2c_bus = i2cbus; - drm_encoder_init(dev, &intel_output->enc, + drm_encoder_init(dev, &intel_encoder->enc, &intel_dvo_enc_funcs, encoder_type); - drm_encoder_helper_add(&intel_output->enc, + drm_encoder_helper_add(&intel_encoder->enc, &intel_dvo_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. @@ -489,10 +456,11 @@ void intel_dvo_init(struct drm_device *dev) return; } - intel_i2c_destroy(intel_output->ddc_bus); + intel_i2c_destroy(intel_encoder->ddc_bus); /* Didn't find a chip, so tear down. */ if (i2cbus != NULL) intel_i2c_destroy(i2cbus); free_intel: - kfree(intel_output); + kfree(intel_encoder); + kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 8cd791dc5b29..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -30,7 +30,6 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> -#include <linux/slab.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> @@ -45,9 +44,10 @@ #include "i915_drm.h" #include "i915_drv.h" -struct intelfb_par { +struct intel_fbdev { struct drm_fb_helper helper; - struct intel_framebuffer *intel_fb; + struct intel_framebuffer ifb; + struct list_head fbdev_list; struct drm_display_mode *our_mode; }; @@ -55,7 +55,6 @@ static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -64,62 +63,12 @@ static struct fb_ops intelfb_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; -static struct drm_fb_helper_funcs intel_fb_helper_funcs = { - .gamma_set = intel_crtc_fb_gamma_set, - .gamma_get = intel_crtc_fb_gamma_get, -}; - - -/** - * Currently it is assumed that the old framebuffer is reused. - * - * LOCKING - * caller should hold the mode config lock. - * - */ -int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) -{ - struct fb_info *info; - struct drm_framebuffer *fb; - struct drm_display_mode *mode = crtc->desired_mode; - - fb = crtc->fb; - if (!fb) - return 1; - - info = fb->fbdev; - if (!info) - return 1; - - if (!mode) - return 1; - - info->var.xres = mode->hdisplay; - info->var.right_margin = mode->hsync_start - mode->hdisplay; - info->var.hsync_len = mode->hsync_end - mode->hsync_start; - info->var.left_margin = mode->htotal - mode->hsync_end; - info->var.yres = mode->vdisplay; - info->var.lower_margin = mode->vsync_start - mode->vdisplay; - info->var.vsync_len = mode->vsync_end - mode->vsync_start; - info->var.upper_margin = mode->vtotal - mode->vsync_end; - info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; - /* avoid overflow */ - info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; - - return 0; -} -EXPORT_SYMBOL(intelfb_resize); - -static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - uint32_t fb_height, uint32_t surface_width, - uint32_t surface_height, - uint32_t surface_depth, uint32_t surface_bpp, - struct drm_framebuffer **fb_p) +static int intelfb_create(struct intel_fbdev *ifbdev, + struct drm_fb_helper_surface_size *sizes) { + struct drm_device *dev = ifbdev->helper.dev; struct fb_info *info; - struct intelfb_par *par; struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; struct drm_mode_fb_cmd mode_cmd; struct drm_gem_object *fbo = NULL; struct drm_i915_gem_object *obj_priv; @@ -127,76 +76,72 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; /* we don't do packed 24bpp */ - if (surface_bpp == 24) - surface_bpp = 32; + if (sizes->surface_bpp == 24) + sizes->surface_bpp = 32; - mode_cmd.width = surface_width; - mode_cmd.height = surface_height; + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; - mode_cmd.bpp = surface_bpp; + mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); - mode_cmd.depth = surface_depth; + mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); - fbo = drm_gem_object_alloc(dev, size); + fbo = i915_gem_alloc_object(dev, size); if (!fbo) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } - obj_priv = fbo->driver_private; + obj_priv = to_intel_bo(fbo); mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(fbo, 64*1024); + ret = intel_pin_and_fence_fb_obj(dev, fbo); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } /* Flush everything out, we'll be doing GTT only from now on */ - i915_gem_object_set_to_gtt_domain(fbo, 1); - - ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); + ret = i915_gem_object_set_to_gtt_domain(fbo, 1); if (ret) { - DRM_ERROR("failed to allocate fb.\n"); + DRM_ERROR("failed to bind fb: %d.\n", ret); goto out_unpin; } - list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); - - intel_fb = to_intel_framebuffer(fb); - *fb_p = fb; - - info = framebuffer_alloc(sizeof(struct intelfb_par), device); + info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } - par = info->par; + info->par = ifbdev; - par->helper.funcs = &intel_fb_helper_funcs; - par->helper.dev = dev; - ret = drm_fb_helper_init_crtc_count(&par->helper, 2, - INTELFB_CONN_LIMIT); - if (ret) - goto out_unref; + intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); + + fb = &ifbdev->ifb.base; + + ifbdev->helper.fb = fb; + ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT; - info->fbops = &intelfb_ops; - /* setup aperture base/size for vesafb takeover */ - info->aperture_base = dev->mode_config.fb_base; + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out_unpin; + } + info->apertures->ranges[0].base = dev->mode_config.fb_base; if (IS_I9XX(dev)) - info->aperture_size = pci_resource_len(dev->pdev, 2); + info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); else - info->aperture_size = pci_resource_len(dev->pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; info->fix.smem_len = size; @@ -209,12 +154,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, ret = -ENOSPC; goto out_unpin; } + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out_unpin; + } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); - drm_fb_helper_fill_var(info, fb, fb_width, fb_height); + drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* FIXME: we really shouldn't expose mmio space at all */ info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); @@ -226,14 +177,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; - fb->fbdev = info; - - par->intel_fb = intel_fb; - - /* To allow resizeing without swapping buffers */ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", - intel_fb->base.width, intel_fb->base.height, - obj_priv->gtt_offset, fbo); + fb->width, fb->height, + obj_priv->gtt_offset, fbo); + mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); @@ -248,35 +195,92 @@ out: return ret; } -int intelfb_probe(struct drm_device *dev) +static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { + struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; + int new_fb = 0; int ret; - DRM_DEBUG_KMS("\n"); - ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); - return ret; + if (!helper->fb) { + ret = intelfb_create(ifbdev, sizes); + if (ret) + return ret; + new_fb = 1; + } + return new_fb; } -EXPORT_SYMBOL(intelfb_probe); -int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) +static struct drm_fb_helper_funcs intel_fb_helper_funcs = { + .gamma_set = intel_crtc_fb_gamma_set, + .gamma_get = intel_crtc_fb_gamma_get, + .fb_probe = intel_fb_find_or_create_single, +}; + +int intel_fbdev_destroy(struct drm_device *dev, + struct intel_fbdev *ifbdev) { struct fb_info *info; + struct intel_framebuffer *ifb = &ifbdev->ifb; - if (!fb) - return -EINVAL; - - info = fb->fbdev; - - if (info) { - struct intelfb_par *par = info->par; + if (ifbdev->helper.fbdev) { + info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); - if (info->par) - drm_fb_helper_free(&par->helper); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } + drm_fb_helper_fini(&ifbdev->helper); + + drm_framebuffer_cleanup(&ifb->base); + if (ifb->obj) + drm_gem_object_unreference(ifb->obj); + + return 0; +} + +int intel_fbdev_init(struct drm_device *dev) +{ + struct intel_fbdev *ifbdev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); + if (!ifbdev) + return -ENOMEM; + + dev_priv->fbdev = ifbdev; + ifbdev->helper.funcs = &intel_fb_helper_funcs; + + ret = drm_fb_helper_init(dev, &ifbdev->helper, + dev_priv->num_pipe, + INTELFB_CONN_LIMIT); + if (ret) { + kfree(ifbdev); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&ifbdev->helper); + drm_fb_helper_initial_config(&ifbdev->helper, 32); return 0; } -EXPORT_SYMBOL(intelfb_remove); + +void intel_fbdev_fini(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (!dev_priv->fbdev) + return; + + intel_fbdev_destroy(dev, dev_priv->fbdev); + kfree(dev_priv->fbdev); + dev_priv->fbdev = NULL; +} MODULE_LICENSE("GPL and additional rights"); + +void intel_fb_output_poll_changed(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); +} diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a30f8bfc1985..83bd764b000e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -27,6 +27,7 @@ */ #include <linux/i2c.h> +#include <linux/slab.h> #include <linux/delay.h> #include "drmP.h" #include "drm.h" @@ -38,7 +39,6 @@ struct intel_hdmi_priv { u32 sdvox_reg; - u32 save_SDVOX; bool has_hdmi_sink; }; @@ -50,8 +50,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 sdvox; sdvox = SDVO_ENCODING_HDMI | @@ -59,11 +59,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; - if (hdmi_priv->has_hdmi_sink) + if (hdmi_priv->has_hdmi_sink) { sdvox |= SDVO_AUDIO_ENABLE; + if (HAS_PCH_CPT(dev)) + sdvox |= HDMI_MODE_SELECT; + } - if (intel_crtc->pipe == 1) - sdvox |= SDVO_PIPE_B_SELECT; + if (intel_crtc->pipe == 1) { + if (HAS_PCH_CPT(dev)) + sdvox |= PORT_TRANS_B_SEL_CPT; + else + sdvox |= SDVO_PIPE_B_SELECT; + } I915_WRITE(hdmi_priv->sdvox_reg, sdvox); POSTING_READ(hdmi_priv->sdvox_reg); @@ -73,8 +80,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 temp; temp = I915_READ(hdmi_priv->sdvox_reg); @@ -105,27 +112,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) } } -static void intel_hdmi_save(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - - hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); -} - -static void intel_hdmi_restore(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - - I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); - POSTING_READ(hdmi_priv->sdvox_reg); -} - static int intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -150,21 +136,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; hdmi_priv->has_hdmi_sink = false; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(connector, + intel_encoder->ddc_bus); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); } - intel_output->base.display_info.raw_edid = NULL; + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -173,24 +160,21 @@ intel_hdmi_detect(struct drm_connector *connector) static int intel_hdmi_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(intel_output); + return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); } static void intel_hdmi_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(connector); } static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { @@ -203,8 +187,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_hdmi_save, - .restore = intel_hdmi_restore, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_hdmi_destroy, @@ -213,12 +195,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { @@ -229,63 +216,72 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct intel_hdmi_priv *hdmi_priv; - intel_output = kcalloc(sizeof(struct intel_output) + + intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); - if (!intel_output) + if (!intel_encoder) return; - hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); - connector = &intel_output->base; + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); + return; + } + + hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); + + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); - intel_output->type = INTEL_OUTPUT_HDMI; + intel_encoder->type = INTEL_OUTPUT_HDMI; + connector->polled = DRM_CONNECTOR_POLL_HPD; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { - intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { - intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { - intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, + intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { - intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, + intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { - intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, + intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, "HDMID"); dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } - if (!intel_output->ddc_bus) + if (!intel_encoder->ddc_bus) goto err_connector; hdmi_priv->sdvox_reg = sdvox_reg; - intel_output->dev_priv = hdmi_priv; + intel_encoder->dev_priv = hdmi_priv; - drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); drm_sysfs_connector_add(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -301,7 +297,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) err_connector: drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); + kfree(intel_connector); return; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index fcc753ca5d94..c2649c7df14c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -26,6 +26,7 @@ * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> +#include <linux/slab.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> #include "drmP.h" diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 14e516fdc2dd..0eab8df5bf7e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -30,6 +30,7 @@ #include <acpi/button.h> #include <linux/dmi.h> #include <linux/i2c.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" @@ -138,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) /* XXX: We never power down the LVDS pairs. */ } -static void intel_lvds_save(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; - u32 pwm_ctl_reg; - - if (HAS_PCH_SPLIT(dev)) { - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_ctl_reg = PCH_PP_CONTROL; - pp_div_reg = PCH_PP_DIVISOR; - pwm_ctl_reg = BLC_PWM_CPU_CTL; - } else { - pp_on_reg = PP_ON_DELAYS; - pp_off_reg = PP_OFF_DELAYS; - pp_ctl_reg = PP_CONTROL; - pp_div_reg = PP_DIVISOR; - pwm_ctl_reg = BLC_PWM_CTL; - } - - dev_priv->savePP_ON = I915_READ(pp_on_reg); - dev_priv->savePP_OFF = I915_READ(pp_off_reg); - dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); - dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); - dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); - dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & - BACKLIGHT_DUTY_CYCLE_MASK); - - /* - * If the light is off at server startup, just make it full brightness - */ - if (dev_priv->backlight_duty_cycle == 0) - dev_priv->backlight_duty_cycle = - intel_lvds_get_max_backlight(dev); -} - -static void intel_lvds_restore(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; - u32 pwm_ctl_reg; - - if (HAS_PCH_SPLIT(dev)) { - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_ctl_reg = PCH_PP_CONTROL; - pp_div_reg = PCH_PP_DIVISOR; - pwm_ctl_reg = BLC_PWM_CPU_CTL; - } else { - pp_on_reg = PP_ON_DELAYS; - pp_off_reg = PP_OFF_DELAYS; - pp_ctl_reg = PP_CONTROL; - pp_div_reg = PP_DIVISOR; - pwm_ctl_reg = BLC_PWM_CTL; - } - - I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); - I915_WRITE(pp_on_reg, dev_priv->savePP_ON); - I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); - I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); - I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); - if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) - intel_lvds_set_power(dev, true); - else - intel_lvds_set_power(dev, false); -} - static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -238,8 +170,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; u32 pfit_control = 0, pfit_pgm_ratios = 0; int left_border = 0, right_border = 0, top_border = 0; int bottom_border = 0; @@ -586,8 +518,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; /* * The LVDS pin pair will already have been turned on in the @@ -607,53 +539,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); } -/* Some lid devices report incorrect lid status, assume they're connected */ -static const struct dmi_system_id bad_lid_status[] = { - { - .ident = "Compaq nx9020", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_BOARD_NAME, "3084"), - }, - }, - { - .ident = "Samsung SX20S", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), - DMI_MATCH(DMI_BOARD_NAME, "SX20S"), - }, - }, - { - .ident = "Aspire One", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), - }, - }, - { - .ident = "Aspire 1810T", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), - }, - }, - { - .ident = "PC-81005", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), - DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), - }, - }, - { - .ident = "Clevo M5x0N", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), - DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), - }, - }, - { } -}; - /** * Detect the LVDS connection. * @@ -669,12 +554,9 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect /* ACPI lid methods were generally unreliable in this generation, so * don't even bother. */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev) || IS_GEN3(dev)) return connector_status_connected; - if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) - status = connector_status_disconnected; - return status; } @@ -684,14 +566,17 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect static int intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - ret = intel_ddc_get_modes(intel_output); + if (dev_priv->lvds_edid_good) { + ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (ret) - return ret; + if (ret) + return ret; + } /* Didn't get an EDID, so * Set wide sync ranges so we get all modes @@ -714,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 0; } +static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); + return 1; +} + +/* The GPU hangs up on these systems if modeset is performed on LID open */ +static const struct dmi_system_id intel_no_modeset_on_lid[] = { + { + .callback = intel_no_modeset_on_lid_dmi_callback, + .ident = "Toshiba Tecra A11", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), + }, + }, + + { } /* terminating entry */ +}; + /* * Lid events. Note the use of 'modeset_on_lid': * - we set it on lid close, and reset it on open @@ -737,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, */ if (connector) connector->status = connector->funcs->detect(connector); + /* Don't force modeset on machines where it causes a GPU lockup */ + if (dmi_check_system(intel_no_modeset_on_lid)) + return NOTIFY_OK; if (!acpi_lid_open()) { dev_priv->modeset_on_lid = 1; return NOTIFY_OK; @@ -764,11 +672,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, static void intel_lvds_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); struct drm_i915_private *dev_priv = dev->dev_private; - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); if (dev_priv->lid_notifier.notifier_call) acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); @@ -781,13 +686,14 @@ static int intel_lvds_set_property(struct drm_connector *connector, uint64_t value) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = - to_intel_output(connector); if (property == dev->mode_config.scaling_mode_property && connector->encoder) { struct drm_crtc *crtc = connector->encoder->crtc; - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct drm_encoder *encoder = connector->encoder; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; + if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return 0; @@ -821,13 +727,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_lvds_save, - .restore = intel_lvds_restore, .detect = intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, @@ -837,7 +741,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { static void intel_lvds_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_lvds_enc_funcs = { @@ -907,6 +816,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron U800", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "U800"), + }, + }, { } /* terminating entry */ }; @@ -1017,7 +934,8 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) void intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ @@ -1045,43 +963,51 @@ void intel_lvds_init(struct drm_device *dev) gpio = PCH_GPIOC; } - intel_output = kzalloc(sizeof(struct intel_output) + + intel_encoder = kzalloc(sizeof(struct intel_encoder) + sizeof(struct intel_lvds_priv), GFP_KERNEL); - if (!intel_output) { + if (!intel_encoder) { return; } - connector = &intel_output->base; - encoder = &intel_output->enc; - drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); + return; + } + + connector = &intel_connector->base; + encoder = &intel_encoder->enc; + drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); - intel_output->type = INTEL_OUTPUT_LVDS; + drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); - intel_output->crtc_mask = (1 << 1); + intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); + intel_encoder->crtc_mask = (1 << 1); + if (IS_I965G(dev)) + intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; - lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); - intel_output->dev_priv = lvds_priv; + lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); + intel_encoder->dev_priv = lvds_priv; /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); /* * the initial panel fitting mode will be FULL_SCREEN. */ - drm_connector_attach_property(&intel_output->base, + drm_connector_attach_property(&intel_connector->base, dev->mode_config.scaling_mode_property, - DRM_MODE_SCALE_FULLSCREEN); - lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; + DRM_MODE_SCALE_ASPECT); + lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; /* * LVDS discovery: * 1) check for EDID on DDC @@ -1093,8 +1019,8 @@ void intel_lvds_init(struct drm_device *dev) */ /* Set up the DDC bus. */ - intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); - if (!intel_output->ddc_bus) { + intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); + if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed; @@ -1104,7 +1030,10 @@ void intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - intel_ddc_get_modes(intel_output); + dev_priv->lvds_edid_good = true; + + if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) + dev_priv->lvds_edid_good = false; list_for_each_entry(scan, &connector->probed_modes, head) { mutex_lock(&dev->mode_config.mutex); @@ -1182,9 +1111,10 @@ out: failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - kfree(intel_output); + kfree(intel_encoder); + kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 67e2f4632a24..4b1fd3d9c73c 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -23,6 +23,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/slab.h> #include <linux/i2c.h> #include <linux/fb.h> #include "drmP.h" @@ -33,7 +34,7 @@ * intel_ddc_probe * */ -bool intel_ddc_probe(struct intel_output *intel_output) +bool intel_ddc_probe(struct intel_encoder *intel_encoder) { u8 out_buf[] = { 0x0, 0x0}; u8 buf[2]; @@ -53,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; - intel_i2c_quirk_set(intel_output->base.dev, true); - ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); - intel_i2c_quirk_set(intel_output->base.dev, false); + intel_i2c_quirk_set(intel_encoder->enc.dev, true); + ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); + intel_i2c_quirk_set(intel_encoder->enc.dev, false); if (ret == 2) return true; @@ -65,22 +66,23 @@ bool intel_ddc_probe(struct intel_output *intel_output) /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use + * @adapter: i2c adapter * * Fetch the EDID information from @connector using the DDC bus. */ -int intel_ddc_get_modes(struct intel_output *intel_output) +int intel_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) { struct edid *edid; int ret = 0; - intel_i2c_quirk_set(intel_output->base.dev, true); - edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); - intel_i2c_quirk_set(intel_output->base.dev, false); + intel_i2c_quirk_set(connector->dev, true); + edid = drm_get_edid(connector, adapter); + intel_i2c_quirk_set(connector->dev, false); if (edid) { - drm_mode_connector_update_edid_property(&intel_output->base, - edid); - ret = drm_add_edid_modes(&intel_output->base, edid); - intel_output->base.display_info.raw_edid = NULL; + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + connector->display_info.raw_edid = NULL; kfree(edid); } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d355d1d527e7..d7ad5139d17c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; int ret; - RING_LOCALS; + drm_i915_private_t *dev_priv = dev->dev_private; BUG_ON(overlay->active); @@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); + ret = i915_do_wait_request(dev, + overlay->last_flip_req, 1, &dev_priv->render_ring); if (ret != 0) return ret; @@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, drm_i915_private_t *dev_priv = dev->dev_private; u32 flip_addr = overlay->flip_addr; u32 tmp; - RING_LOCALS; BUG_ON(!overlay->active); @@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay, OUT_RING(flip_addr); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); } static int intel_overlay_wait_flip(struct intel_overlay *overlay) @@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) drm_i915_private_t *dev_priv = dev->dev_private; int ret; u32 tmp; - RING_LOCALS; if (overlay->last_flip_req != 0) { - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); + ret = i915_do_wait_request(dev, overlay->last_flip_req, + 1, &dev_priv->render_ring); if (ret == 0) { overlay->last_flip_req = 0; @@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); + ret = i915_do_wait_request(dev, overlay->last_flip_req, + 1, &dev_priv->render_ring); if (ret != 0) return ret; @@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) { u32 flip_addr = overlay->flip_addr; struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; int ret; - RING_LOCALS; BUG_ON(!overlay->active); @@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); + ret = i915_do_wait_request(dev, overlay->last_flip_req, + 1, &dev_priv->render_ring); if (ret != 0) return ret; @@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); + ret = i915_do_wait_request(dev, overlay->last_flip_req, + 1, &dev_priv->render_ring); if (ret != 0) return ret; @@ -373,7 +379,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* never have the overlay hw on without showing a frame */ BUG_ON(!overlay->vid_bo); - obj = overlay->vid_bo->obj; + obj = &overlay->vid_bo->base; i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); @@ -390,28 +396,29 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, int interruptible) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; + drm_i915_private_t *dev_priv = dev->dev_private; u32 flip_addr; int ret; - RING_LOCALS; if (overlay->hw_wedged == HW_WEDGED) return -EIO; if (overlay->last_flip_req == 0) { - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; } - ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); + ret = i915_do_wait_request(dev, overlay->last_flip_req, + interruptible, &dev_priv->render_ring); if (ret != 0) return ret; switch (overlay->hw_wedged) { case RELEASE_OLD_VID: - obj = overlay->old_vid_bo->obj; + obj = &overlay->old_vid_bo->base; i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); overlay->old_vid_bo = NULL; @@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = i915_add_request(dev, NULL, 0); + overlay->last_flip_req = i915_add_request(dev, NULL, + 0, &dev_priv->render_ring); if (overlay->last_flip_req == 0) return -ENOMEM; ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible); + interruptible, &dev_priv->render_ring); if (ret != 0) return ret; @@ -467,7 +475,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (ret != 0) return ret; - obj = overlay->old_vid_bo->obj; + obj = &overlay->old_vid_bo->base; i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); overlay->old_vid_bo = NULL; @@ -724,7 +732,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; - struct drm_i915_gem_object *bo_priv = new_bo->driver_private; + struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); struct drm_device *dev = overlay->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -809,7 +817,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, intel_overlay_continue(overlay, scale_changed); overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = new_bo->driver_private; + overlay->vid_bo = to_intel_bo(new_bo); return 0; @@ -1068,14 +1076,18 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, DRM_MODE_OBJECT_CRTC); - if (!drmmode_obj) - return -ENOENT; + if (!drmmode_obj) { + ret = -ENOENT; + goto out_free; + } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); new_bo = drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle); - if (!new_bo) - return -ENOENT; + if (!new_bo) { + ret = -ENOENT; + goto out_free; + } mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); @@ -1165,6 +1177,7 @@ out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); drm_gem_object_unreference_unlocked(new_bo); +out_free: kfree(params); return ret; @@ -1336,10 +1349,10 @@ void intel_setup_overlay(struct drm_device *dev) return; overlay->dev = dev; - reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); + reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (!reg_bo) goto out_free; - overlay->reg_bo = reg_bo->driver_private; + overlay->reg_bo = to_intel_bo(reg_bo); if (OVERLAY_NONPHYSICAL(dev)) { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c new file mode 100644 index 000000000000..26362f8495a8 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -0,0 +1,851 @@ +/* + * Copyright © 2008-2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Zou Nan hai <nanhai.zou@intel.com> + * Xiang Hai hao<haihao.xiang@intel.com> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drv.h" +#include "i915_drm.h" +#include "i915_trace.h" + +static void +render_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ +#if WATCH_EXEC + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, + invalidate_domains, flush_domains); +#endif + u32 cmd; + trace_i915_gem_request_flush(dev, ring->next_seqno, + invalidate_domains, flush_domains); + + if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { + /* + * read/write caches: + * + * I915_GEM_DOMAIN_RENDER is always invalidated, but is + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is + * also flushed at 2d versus 3d pipeline switches. + * + * read-only caches: + * + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if + * MI_READ_FLUSH is set, and is always flushed on 965. + * + * I915_GEM_DOMAIN_COMMAND may not exist? + * + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is + * invalidated when MI_EXE_FLUSH is set. + * + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is + * invalidated with every MI_FLUSH. + * + * TLBs: + * + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER + * are flushed at any MI_FLUSH. + */ + + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + if ((invalidate_domains|flush_domains) & + I915_GEM_DOMAIN_RENDER) + cmd &= ~MI_NO_WRITE_FLUSH; + if (!IS_I965G(dev)) { + /* + * On the 965, the sampler cache always gets flushed + * and this bit is reserved. + */ + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + } + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd |= MI_EXE_FLUSH; + +#if WATCH_EXEC + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); +#endif + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, cmd); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); + } +} + +static unsigned int render_ring_get_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(PRB0_HEAD) & HEAD_ADDR; +} + +static unsigned int render_ring_get_tail(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(PRB0_TAIL) & TAIL_ADDR; +} + +static unsigned int render_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + + return I915_READ(acthd_reg); +} + +static void render_ring_advance_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(PRB0_TAIL, ring->tail); +} + +static int init_ring_common(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + u32 head; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + obj_priv = to_intel_bo(ring->gem_object); + + /* Stop the ring if it's running. */ + I915_WRITE(ring->regs.ctl, 0); + I915_WRITE(ring->regs.head, 0); + I915_WRITE(ring->regs.tail, 0); + + /* Initialize the ring. */ + I915_WRITE(ring->regs.start, obj_priv->gtt_offset); + head = ring->get_head(dev, ring); + + /* G45 ring initialization fails to reset head to zero */ + if (head != 0) { + DRM_ERROR("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + + I915_WRITE(ring->regs.head, 0); + + DRM_ERROR("%s head forced to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + } + + I915_WRITE(ring->regs.ctl, + ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) + | RING_NO_REPORT | RING_VALID); + + head = I915_READ(ring->regs.head) & HEAD_ADDR; + /* If the head is still not zero, the ring is dead */ + if (head != 0) { + DRM_ERROR("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + return -EIO; + } + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = ring->get_head(dev, ring); + ring->tail = ring->get_tail(dev, ring); + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + } + return 0; +} + +static int init_render_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret = init_ring_common(dev, ring); + if (IS_I9XX(dev) && !IS_GEN3(dev)) { + I915_WRITE(MI_MODE, + (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); + } + return ret; +} + +#define PIPE_CONTROL_FLUSH(addr) \ +do { \ + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + PIPE_CONTROL_DEPTH_STALL | 2); \ + OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ + OUT_RING(0); \ + OUT_RING(0); \ +} while (0) + +/** + * Creates a new sequence number, emitting a write of it to the status page + * plus an interrupt, which will trigger i915_user_interrupt_handler. + * + * Must be called with struct_lock held. + * + * Returned sequence numbers are nonzero on success. + */ +static u32 +render_ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains) +{ + u32 seqno; + drm_i915_private_t *dev_priv = dev->dev_private; + seqno = intel_ring_get_seqno(dev, ring); + + if (IS_GEN6(dev)) { + BEGIN_LP_RING(6); + OUT_RING(GFX_OP_PIPE_CONTROL | 3); + OUT_RING(PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | + PIPE_CONTROL_NOTIFY); + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + OUT_RING(seqno); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + } else if (HAS_PIPE_CONTROL(dev)) { + u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; + + /* + * Workaround qword write incoherence by flushing the + * PIPE_NOTIFY buffers out to memory before requesting + * an interrupt. + */ + BEGIN_LP_RING(32); + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + OUT_RING(seqno); + OUT_RING(0); + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + OUT_RING(seqno); + OUT_RING(0); + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(seqno); + + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } + return seqno; +} + +static u32 +render_ring_get_gem_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + if (HAS_PIPE_CONTROL(dev)) + return ((volatile u32 *)(dev_priv->seqno_page))[0]; + else + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static void +render_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { + if (HAS_PCH_SPLIT(dev)) + ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + } + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); +} + +static void +render_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); + if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { + if (HAS_PCH_SPLIT(dev)) + ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + } + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); +} + +static void render_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (IS_GEN6(dev)) { + I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); + I915_READ(HWS_PGA_GEN6); /* posting read */ + } else { + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); + I915_READ(HWS_PGA); /* posting read */ + } + +} + +void +bsd_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, MI_FLUSH); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); +} + +static inline unsigned int bsd_ring_get_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; +} + +static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; +} + +static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(BSD_RING_ACTHD); +} + +static inline void bsd_ring_advance_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(BSD_RING_TAIL, ring->tail); +} + +static int init_bsd_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + return init_ring_common(dev, ring); +} + +static u32 +bsd_ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains) +{ + u32 seqno; + seqno = intel_ring_get_seqno(dev, ring); + intel_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(dev, ring, + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(dev, ring, seqno); + intel_ring_emit(dev, ring, MI_USER_INTERRUPT); + intel_ring_advance(dev, ring); + + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + + return seqno; +} + +static void bsd_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); + I915_READ(BSD_HWS_PGA); +} + +static void +bsd_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} +static void +bsd_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} + +static u32 +bsd_ring_get_gem_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static int +bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) +{ + uint32_t exec_start; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | + (2 << 6) | MI_BATCH_NON_SECURE_I965); + intel_ring_emit(dev, ring, exec_start); + intel_ring_advance(dev, ring); + return 0; +} + + +static int +render_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int nbox = exec->num_cliprects; + int i = 0, count; + uint32_t exec_start, exec_len; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + exec_len = (uint32_t) exec->batch_len; + + trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); + + count = nbox ? nbox : 1; + + for (i = 0; i < count; i++) { + if (i < nbox) { + int ret = i915_emit_box(dev, cliprects, i, + exec->DR1, exec->DR4); + if (ret) + return ret; + } + + if (IS_I830(dev) || IS_845G(dev)) { + intel_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER); + intel_ring_emit(dev, ring, + exec_start | MI_BATCH_NON_SECURE); + intel_ring_emit(dev, ring, exec_start + exec_len - 4); + intel_ring_emit(dev, ring, 0); + } else { + intel_ring_begin(dev, ring, 4); + if (IS_I965G(dev)) { + intel_ring_emit(dev, ring, + MI_BATCH_BUFFER_START | (2 << 6) + | MI_BATCH_NON_SECURE_I965); + intel_ring_emit(dev, ring, exec_start); + } else { + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START + | (2 << 6)); + intel_ring_emit(dev, ring, exec_start | + MI_BATCH_NON_SECURE); + } + } + intel_ring_advance(dev, ring); + } + + /* XXX breadcrumb */ + return 0; +} + +static void cleanup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj = ring->status_page.obj; + if (obj == NULL) + return; + obj_priv = to_intel_bo(obj); + + kunmap(obj_priv->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + ring->status_page.obj = NULL; + + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); +} + +static int init_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + obj = i915_gem_alloc_object(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate status page\n"); + ret = -ENOMEM; + goto err; + } + obj_priv = to_intel_bo(obj); + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { + goto err_unref; + } + + ring->status_page.gfx_addr = obj_priv->gtt_offset; + ring->status_page.page_addr = kmap(obj_priv->pages[0]); + if (ring->status_page.page_addr == NULL) { + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + goto err_unpin; + } + ring->status_page.obj = obj; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + + ring->setup_status_page(dev, ring); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + ring->name, ring->status_page.gfx_addr); + + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(obj); +err: + return ret; +} + + +int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + int ret; + struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *obj; + ring->dev = dev; + + if (I915_NEED_GFX_HWS(dev)) { + ret = init_status_page(dev, ring); + if (ret) + return ret; + } + + obj = i915_gem_alloc_object(dev, ring->size); + if (obj == NULL) { + DRM_ERROR("Failed to allocate ringbuffer\n"); + ret = -ENOMEM; + goto cleanup; + } + + ring->gem_object = obj; + + ret = i915_gem_object_pin(obj, ring->alignment); + if (ret != 0) { + drm_gem_object_unreference(obj); + goto cleanup; + } + + obj_priv = to_intel_bo(obj); + ring->map.size = ring->size; + ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; + + drm_core_ioremap_wc(&ring->map, dev); + if (ring->map.handle == NULL) { + DRM_ERROR("Failed to map ringbuffer.\n"); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + ret = -EINVAL; + goto cleanup; + } + + ring->virtual_start = ring->map.handle; + ret = ring->init(dev, ring); + if (ret != 0) { + intel_cleanup_ring_buffer(dev, ring); + return ret; + } + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = ring->get_head(dev, ring); + ring->tail = ring->get_tail(dev, ring); + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + } + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + return ret; +cleanup: + cleanup_status_page(dev, ring); + return ret; +} + +void intel_cleanup_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + if (ring->gem_object == NULL) + return; + + drm_core_ioremapfree(&ring->map, dev); + + i915_gem_object_unpin(ring->gem_object); + drm_gem_object_unreference(ring->gem_object); + ring->gem_object = NULL; + cleanup_status_page(dev, ring); +} + +int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + unsigned int *virt; + int rem; + rem = ring->size - ring->tail; + + if (ring->space < rem) { + int ret = intel_wait_ring_buffer(dev, ring, rem); + if (ret) + return ret; + } + + virt = (unsigned int *)(ring->virtual_start + ring->tail); + rem /= 4; + while (rem--) + *virt++ = MI_NOOP; + + ring->tail = 0; + ring->space = ring->head - 8; + + return 0; +} + +int intel_wait_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring, int n) +{ + unsigned long end; + + trace_i915_ring_wait_begin (dev); + end = jiffies + 3 * HZ; + do { + ring->head = ring->get_head(dev, ring); + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + if (ring->space >= n) { + trace_i915_ring_wait_end (dev); + return 0; + } + + if (dev->primary->master) { + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + } + + yield(); + } while (!time_after(jiffies, end)); + trace_i915_ring_wait_end (dev); + return -EBUSY; +} + +void intel_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, int num_dwords) +{ + int n = 4*num_dwords; + if (unlikely(ring->tail + n > ring->size)) + intel_wrap_ring_buffer(dev, ring); + if (unlikely(ring->space < n)) + intel_wait_ring_buffer(dev, ring, n); +} + +void intel_ring_emit(struct drm_device *dev, + struct intel_ring_buffer *ring, unsigned int data) +{ + unsigned int *virt = ring->virtual_start + ring->tail; + *virt = data; + ring->tail += 4; + ring->tail &= ring->size - 1; + ring->space -= 4; +} + +void intel_ring_advance(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + ring->advance_ring(dev, ring); +} + +void intel_fill_struct(struct drm_device *dev, + struct intel_ring_buffer *ring, + void *data, + unsigned int len) +{ + unsigned int *virt = ring->virtual_start + ring->tail; + BUG_ON((len&~(4-1)) != 0); + intel_ring_begin(dev, ring, len/4); + memcpy(virt, data, len); + ring->tail += len; + ring->tail &= ring->size - 1; + ring->space -= len; + intel_ring_advance(dev, ring); +} + +u32 intel_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + u32 seqno; + seqno = ring->next_seqno; + + /* reserve 0 for non-seqno */ + if (++ring->next_seqno == 0) + ring->next_seqno = 1; + return seqno; +} + +struct intel_ring_buffer render_ring = { + .name = "render ring", + .regs = { + .ctl = PRB0_CTL, + .head = PRB0_HEAD, + .tail = PRB0_TAIL, + .start = PRB0_START + }, + .ring_flag = I915_EXEC_RENDER, + .size = 32 * PAGE_SIZE, + .alignment = PAGE_SIZE, + .virtual_start = NULL, + .dev = NULL, + .gem_object = NULL, + .head = 0, + .tail = 0, + .space = 0, + .next_seqno = 1, + .user_irq_refcount = 0, + .irq_gem_seqno = 0, + .waiting_gem_seqno = 0, + .setup_status_page = render_setup_status_page, + .init = init_render_ring, + .get_head = render_ring_get_head, + .get_tail = render_ring_get_tail, + .get_active_head = render_ring_get_active_head, + .advance_ring = render_ring_advance_ring, + .flush = render_ring_flush, + .add_request = render_ring_add_request, + .get_gem_seqno = render_ring_get_gem_seqno, + .user_irq_get = render_ring_get_user_irq, + .user_irq_put = render_ring_put_user_irq, + .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, + .status_page = {NULL, 0, NULL}, + .map = {0,} +}; + +/* ring buffer for bit-stream decoder */ + +struct intel_ring_buffer bsd_ring = { + .name = "bsd ring", + .regs = { + .ctl = BSD_RING_CTL, + .head = BSD_RING_HEAD, + .tail = BSD_RING_TAIL, + .start = BSD_RING_START + }, + .ring_flag = I915_EXEC_BSD, + .size = 32 * PAGE_SIZE, + .alignment = PAGE_SIZE, + .virtual_start = NULL, + .dev = NULL, + .gem_object = NULL, + .head = 0, + .tail = 0, + .space = 0, + .next_seqno = 1, + .user_irq_refcount = 0, + .irq_gem_seqno = 0, + .waiting_gem_seqno = 0, + .setup_status_page = bsd_setup_status_page, + .init = init_bsd_ring, + .get_head = bsd_ring_get_head, + .get_tail = bsd_ring_get_tail, + .get_active_head = bsd_ring_get_active_head, + .advance_ring = bsd_ring_advance_ring, + .flush = bsd_ring_flush, + .add_request = bsd_ring_add_request, + .get_gem_seqno = bsd_ring_get_gem_seqno, + .user_irq_get = bsd_ring_get_user_irq, + .user_irq_put = bsd_ring_put_user_irq, + .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, + .status_page = {NULL, 0, NULL}, + .map = {0,} +}; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h new file mode 100644 index 000000000000..d5568d3766de --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -0,0 +1,124 @@ +#ifndef _INTEL_RINGBUFFER_H_ +#define _INTEL_RINGBUFFER_H_ + +struct intel_hw_status_page { + void *page_addr; + unsigned int gfx_addr; + struct drm_gem_object *obj; +}; + +struct drm_i915_gem_execbuffer2; +struct intel_ring_buffer { + const char *name; + struct ring_regs { + u32 ctl; + u32 head; + u32 tail; + u32 start; + } regs; + unsigned int ring_flag; + unsigned long size; + unsigned int alignment; + void *virtual_start; + struct drm_device *dev; + struct drm_gem_object *gem_object; + + unsigned int head; + unsigned int tail; + unsigned int space; + u32 next_seqno; + struct intel_hw_status_page status_page; + + u32 irq_gem_seqno; /* last seq seem at irq time */ + u32 waiting_gem_seqno; + int user_irq_refcount; + void (*user_irq_get)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*user_irq_put)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*setup_status_page)(struct drm_device *dev, + struct intel_ring_buffer *ring); + + int (*init)(struct drm_device *dev, + struct intel_ring_buffer *ring); + + unsigned int (*get_head)(struct drm_device *dev, + struct intel_ring_buffer *ring); + unsigned int (*get_tail)(struct drm_device *dev, + struct intel_ring_buffer *ring); + unsigned int (*get_active_head)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*advance_ring)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*flush)(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains); + u32 (*add_request)(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains); + u32 (*get_gem_seqno)(struct drm_device *dev, + struct intel_ring_buffer *ring); + int (*dispatch_gem_execbuffer)(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset); + + /** + * List of objects currently involved in rendering from the + * ringbuffer. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head request_list; + + wait_queue_head_t irq_queue; + drm_local_map_t map; +}; + +static inline u32 +intel_read_status_page(struct intel_ring_buffer *ring, + int reg) +{ + u32 *regs = ring->status_page.page_addr; + return regs[reg]; +} + +int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +void intel_cleanup_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +int intel_wait_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring, int n); +int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +void intel_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, int n); +void intel_ring_emit(struct drm_device *dev, + struct intel_ring_buffer *ring, u32 data); +void intel_fill_struct(struct drm_device *dev, + struct intel_ring_buffer *ring, + void *data, + unsigned int len); +void intel_ring_advance(struct drm_device *dev, + struct intel_ring_buffer *ring); + +u32 intel_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring); + +extern struct intel_ring_buffer render_ring; +extern struct intel_ring_buffer bsd_ring; + +#endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 48daee5c9c63..03c231be2273 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -26,6 +26,7 @@ * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> +#include <linux/slab.h> #include <linux/delay.h> #include "drmP.h" #include "drm.h" @@ -35,7 +36,18 @@ #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" -#include <linux/dmi.h> + +#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) +#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) +#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) +#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) + +#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ + SDVO_TV_MASK) + +#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) +#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) + static char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", @@ -53,7 +65,7 @@ struct intel_sdvo_priv { u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ - int output_device; + int sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; @@ -85,12 +97,6 @@ struct intel_sdvo_priv { /* This is for current tv format name */ char *tv_format_name; - /* This contains all current supported TV format */ - char *tv_format_supported[TV_FORMAT_NUM]; - int format_supported_num; - struct drm_property *tv_format_property; - struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; - /** * This is set if we treat the device as HDMI, instead of DVI. */ @@ -111,29 +117,36 @@ struct intel_sdvo_priv { */ struct drm_display_mode *sdvo_lvds_fixed_mode; - /** - * Returned SDTV resolutions allowed for the current format, if the - * device reported it. - */ - struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; - /* * supported encoding mode, used to determine whether HDMI is * supported */ struct intel_sdvo_encode encode; - /* DDC bus used by this SDVO output */ + /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Mac mini hack -- use the same DDC as the analog connector */ struct i2c_adapter *analog_ddc_bus; - int save_sdvo_mult; - u16 save_active_outputs; - struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; - struct intel_sdvo_dtd save_output_dtd[16]; - u32 save_SDVOX; +}; + +struct intel_sdvo_connector { + /* Mark the type of connector */ + uint16_t output_flag; + + /* This contains all current supported TV format */ + char *tv_format_supported[TV_FORMAT_NUM]; + int format_supported_num; + struct drm_property *tv_format_property; + struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; + + /** + * Returned SDTV resolutions allowed for the current format, if the + * device reported it. + */ + struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; + /* add the property for the SDVO-TV */ struct drm_property *left_property; struct drm_property *right_property; @@ -161,22 +174,33 @@ struct intel_sdvo_priv { }; static bool -intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); +intel_sdvo_output_setup(struct intel_encoder *intel_encoder, + uint16_t flags); +static void +intel_sdvo_tv_create_property(struct drm_connector *connector, int type); +static void +intel_sdvo_create_enhance_property(struct drm_connector *connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ -static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) +static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 bval = val, cval = val; int i; - if (sdvo_priv->output_device == SDVOB) { + if (sdvo_priv->sdvo_reg == PCH_SDVOB) { + I915_WRITE(sdvo_priv->sdvo_reg, val); + I915_READ(sdvo_priv->sdvo_reg); + return; + } + + if (sdvo_priv->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); @@ -195,10 +219,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) } } -static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, +static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, u8 *ch) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2]; u8 buf[2]; int ret; @@ -221,7 +245,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, out_buf[0] = addr; out_buf[1] = 0; - if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -231,10 +255,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, return false; } -static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, +static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, u8 ch) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2]; struct i2c_msg msgs[] = { { @@ -248,7 +272,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) + if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) { return true; } @@ -352,13 +376,14 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; -#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") -#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) +#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) +#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") +#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) -static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, +static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, void *args, int args_len) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; DRM_DEBUG_KMS("%s: W: %02X ", @@ -367,30 +392,30 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) DRM_LOG_KMS(" "); - for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { + for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) { DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); break; } } - if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) + if (i == ARRAY_SIZE(sdvo_cmd_names)) DRM_LOG_KMS("(%02X)", cmd); DRM_LOG_KMS("\n"); } -static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, +static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, void *args, int args_len) { int i; - intel_sdvo_debug_write(intel_output, cmd, args, args_len); + intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); for (i = 0; i < args_len; i++) { - intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]); } - intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); } static const char *cmd_status_names[] = { @@ -403,11 +428,11 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; -static void intel_sdvo_debug_response(struct intel_output *intel_output, +static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, void *response, int response_len, u8 status) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); @@ -422,7 +447,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, DRM_LOG_KMS("\n"); } -static u8 intel_sdvo_read_response(struct intel_output *intel_output, +static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, void *response, int response_len) { int i; @@ -432,16 +457,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, while (retry--) { /* Read the command response */ for (i = 0; i < response_len; i++) { - intel_sdvo_read_byte(intel_output, + intel_sdvo_read_byte(intel_encoder, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i]); } /* read the return status */ - intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, + intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, &status); - intel_sdvo_debug_response(intel_output, response, response_len, + intel_sdvo_debug_response(intel_encoder, response, response_len, status); if (status != SDVO_CMD_STATUS_PENDING) return status; @@ -469,10 +494,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) * another I2C transaction after issuing the DDC bus switch, it will be * switched to the internal SDVO register. */ -static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, +static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, u8 target) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2], cmd_buf[2], ret_value[2], ret; struct i2c_msg msgs[] = { { @@ -496,10 +521,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, }, }; - intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, + intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); /* write the DDC switch command argument */ - intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); out_buf[0] = SDVO_I2C_OPCODE; out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; @@ -508,7 +533,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, ret_value[0] = 0; ret_value[1] = 0; - ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); + ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); if (ret != 3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); @@ -522,7 +547,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, return; } -static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) +static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) { struct intel_sdvo_set_target_input_args targets = {0}; u8 status; @@ -533,10 +558,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool if (target_1) targets.target_1 = 1; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } @@ -547,13 +572,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ -static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) +static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -562,29 +587,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo return true; } -static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, - u16 *outputs) -{ - u8 status; - - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); - - return (status == SDVO_CMD_STATUS_SUCCESS); -} - -static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, +static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, u16 outputs) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, +static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, int mode) { u8 status, state = SDVO_ENCODER_STATE_ON; @@ -604,24 +618,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output break; } - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, +static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, 0); - status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); + status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -633,92 +647,58 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou return true; } -static bool intel_sdvo_set_target_output(struct intel_output *intel_output, +static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, u16 outputs) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, +static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, struct intel_sdvo_dtd *dtd) { u8 status; - intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); - status = intel_sdvo_read_response(intel_output, &dtd->part1, - sizeof(dtd->part1)); + intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; - intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); - status = intel_sdvo_read_response(intel_output, &dtd->part2, - sizeof(dtd->part2)); + intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } -static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, +static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_get_timing(intel_output, - SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); -} - -static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, - struct intel_sdvo_dtd *dtd) -{ - return intel_sdvo_get_timing(intel_output, - SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); -} - -static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, - struct intel_sdvo_dtd *dtd) -{ - u8 status; - - intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); - status = intel_sdvo_read_response(intel_output, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); - status = intel_sdvo_read_response(intel_output, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return true; -} - -static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, - struct intel_sdvo_dtd *dtd) -{ - return intel_sdvo_set_timing(intel_output, + return intel_sdvo_set_timing(intel_encoder, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } -static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, +static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_set_timing(intel_output, + return intel_sdvo_set_timing(intel_encoder, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool -intel_sdvo_create_preferred_input_timing(struct intel_output *output, +intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; uint8_t status; memset(&args, 0, sizeof(args)); @@ -732,32 +712,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; - intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } -static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, +static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { bool status; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, NULL, 0); - status = intel_sdvo_read_response(output, &dtd->part1, + status = intel_sdvo_read_response(intel_encoder, &dtd->part1, sizeof(dtd->part1)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, NULL, 0); - status = intel_sdvo_read_response(output, &dtd->part2, + status = intel_sdvo_read_response(intel_encoder, &dtd->part2, sizeof(dtd->part2)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -765,29 +746,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, return false; } -static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) -{ - u8 response, status; - - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 1); - - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); - return SDVO_CLOCK_RATE_MULT_1X; - } else { - DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); - } - - return response; -} - -static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) +static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); - status = intel_sdvo_read_response(intel_output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -876,13 +840,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_output *output, +static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, struct intel_sdvo_encode *encode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); - status = intel_sdvo_read_response(output, encode, sizeof(*encode)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ memset(encode, 0, sizeof(*encode)); return false; @@ -891,29 +855,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, return true; } -static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) +static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, + uint8_t mode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); - status = intel_sdvo_read_response(output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_set_colorimetry(struct intel_output *output, +static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, uint8_t mode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); - status = intel_sdvo_read_response(output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } #if 0 -static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) +static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) { int i, j; uint8_t set_buf_index[2]; @@ -922,43 +887,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) uint8_t buf[48]; uint8_t *pos; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); - intel_sdvo_read_response(output, &av_split, 1); + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); + intel_sdvo_read_response(encoder, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, + intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); - intel_sdvo_read_response(output, &buf_size, 1); + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); + intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); - intel_sdvo_read_response(output, pos, 8); + intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif -static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, - uint8_t *data, int8_t size, uint8_t tx_rate) +static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, + int index, + uint8_t *data, int8_t size, uint8_t tx_rate) { uint8_t set_buf_index[2]; set_buf_index[0] = index; set_buf_index[1] = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2); for (; size > 0; size -= 8) { - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); data += 8; } - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) @@ -1033,7 +1000,7 @@ struct dip_infoframe { } __attribute__ ((packed)) u; } __attribute__((packed)); -static void intel_sdvo_set_avi_infoframe(struct intel_output *output, +static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, struct drm_display_mode * mode) { struct dip_infoframe avi_if = { @@ -1044,15 +1011,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 4 + avi_if.len); - intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, + intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, + 4 + avi_if.len, SDVO_HBUF_TX_VSYNC); } -static void intel_sdvo_set_tv_format(struct intel_output *output) +static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) { struct intel_sdvo_tv_format format; - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; uint32_t format_map, i; uint8_t status; @@ -1065,10 +1033,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? sizeof(format) : sizeof(format_map)); - intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) DRM_DEBUG_KMS("%s: Failed to set TV format\n", SDVO_NAME(sdvo_priv)); @@ -1078,8 +1046,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *dev_priv = output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; if (dev_priv->is_tv) { struct intel_sdvo_dtd output_dtd; @@ -1094,22 +1062,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, /* Set output timings */ intel_sdvo_get_dtd_from_mode(&output_dtd, mode); - intel_sdvo_set_target_output(output, - dev_priv->controlled_output); - intel_sdvo_set_output_timing(output, &output_dtd); + intel_sdvo_set_target_output(intel_encoder, + dev_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); - success = intel_sdvo_create_preferred_input_timing(output, + success = intel_sdvo_create_preferred_input_timing(intel_encoder, mode->clock / 10, mode->hdisplay, mode->vdisplay); if (success) { struct intel_sdvo_dtd input_dtd; - intel_sdvo_get_preferred_input_timing(output, + intel_sdvo_get_preferred_input_timing(intel_encoder, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; @@ -1132,16 +1100,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, intel_sdvo_get_dtd_from_mode(&output_dtd, dev_priv->sdvo_lvds_fixed_mode); - intel_sdvo_set_target_output(output, - dev_priv->controlled_output); - intel_sdvo_set_output_timing(output, &output_dtd); + intel_sdvo_set_target_output(intel_encoder, + dev_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); success = intel_sdvo_create_preferred_input_timing( - output, + intel_encoder, mode->clock / 10, mode->hdisplay, mode->vdisplay); @@ -1149,7 +1117,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, if (success) { struct intel_sdvo_dtd input_dtd; - intel_sdvo_get_preferred_input_timing(output, + intel_sdvo_get_preferred_input_timing(intel_encoder, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; @@ -1181,8 +1149,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 sdvox = 0; int sdvo_pixel_multiply; struct intel_sdvo_in_out_map in_out; @@ -1198,15 +1166,15 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ - in_out.in0 = sdvo_priv->controlled_output; + in_out.in0 = sdvo_priv->attached_output; in_out.in1 = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (sdvo_priv->is_hdmi) { - intel_sdvo_set_avi_infoframe(output, mode); + intel_sdvo_set_avi_infoframe(intel_encoder, mode); sdvox |= SDVO_AUDIO_ENABLE; } @@ -1223,16 +1191,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, */ if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { /* Set the output timing to the screen */ - intel_sdvo_set_target_output(output, - sdvo_priv->controlled_output); - intel_sdvo_set_output_timing(output, &input_dtd); + intel_sdvo_set_target_output(intel_encoder, + sdvo_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &input_dtd); } /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); if (sdvo_priv->is_tv) - intel_sdvo_set_tv_format(output); + intel_sdvo_set_tv_format(intel_encoder); /* We would like to use intel_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that @@ -1240,29 +1208,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * output the preferred timing, and we don't support that currently. */ #if 0 - success = intel_sdvo_create_preferred_input_timing(output, clock, + success = intel_sdvo_create_preferred_input_timing(encoder, clock, width, height); if (success) { struct intel_sdvo_dtd *input_dtd; - intel_sdvo_get_preferred_input_timing(output, &input_dtd); - intel_sdvo_set_input_timing(output, &input_dtd); + intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); + intel_sdvo_set_input_timing(encoder, &input_dtd); } #else - intel_sdvo_set_input_timing(output, &input_dtd); + intel_sdvo_set_input_timing(intel_encoder, &input_dtd); #endif switch (intel_sdvo_get_pixel_multiplier(mode)) { case 1: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_1X); break; case 2: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_2X); break; case 4: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_4X); break; } @@ -1273,8 +1241,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; } else { - sdvox |= I915_READ(sdvo_priv->output_device); - switch (sdvo_priv->output_device) { + sdvox |= I915_READ(sdvo_priv->sdvo_reg); + switch (sdvo_priv->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; @@ -1298,26 +1266,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) sdvox |= SDVO_STALL_SELECT; - intel_sdvo_write_sdvox(output, sdvox); + intel_sdvo_write_sdvox(intel_encoder, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 temp; if (mode != DRM_MODE_DPMS_ON) { - intel_sdvo_set_active_outputs(intel_output, 0); + intel_sdvo_set_active_outputs(intel_encoder, 0); if (0) - intel_sdvo_set_encoder_power_state(intel_output, mode); + intel_sdvo_set_encoder_power_state(intel_encoder, mode); if (mode == DRM_MODE_DPMS_OFF) { - temp = I915_READ(sdvo_priv->output_device); + temp = I915_READ(sdvo_priv->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { - intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); } } } else { @@ -1325,13 +1293,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) int i; u8 status; - temp = I915_READ(sdvo_priv->output_device); + temp = I915_READ(sdvo_priv->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) - intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev); - status = intel_sdvo_get_trained_inputs(intel_output, &input1, + status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); @@ -1345,109 +1313,18 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) } if (0) - intel_sdvo_set_encoder_power_state(intel_output, mode); - intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); + intel_sdvo_set_encoder_power_state(intel_encoder, mode); + intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); } return; } -static void intel_sdvo_save(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - int o; - - sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); - intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); - - if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { - intel_sdvo_set_target_input(intel_output, true, false); - intel_sdvo_get_input_timing(intel_output, - &sdvo_priv->save_input_dtd_1); - } - - if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { - intel_sdvo_set_target_input(intel_output, false, true); - intel_sdvo_get_input_timing(intel_output, - &sdvo_priv->save_input_dtd_2); - } - - for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) - { - u16 this_output = (1 << o); - if (sdvo_priv->caps.output_flags & this_output) - { - intel_sdvo_set_target_output(intel_output, this_output); - intel_sdvo_get_output_timing(intel_output, - &sdvo_priv->save_output_dtd[o]); - } - } - if (sdvo_priv->is_tv) { - /* XXX: Save TV format/enhancements. */ - } - - sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); -} - -static void intel_sdvo_restore(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - int o; - int i; - bool input1, input2; - u8 status; - - intel_sdvo_set_active_outputs(intel_output, 0); - - for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) - { - u16 this_output = (1 << o); - if (sdvo_priv->caps.output_flags & this_output) { - intel_sdvo_set_target_output(intel_output, this_output); - intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); - } - } - - if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { - intel_sdvo_set_target_input(intel_output, true, false); - intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); - } - - if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { - intel_sdvo_set_target_input(intel_output, false, true); - intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); - } - - intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); - - if (sdvo_priv->is_tv) { - /* XXX: Restore TV format/enhancements. */ - } - - intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); - - if (sdvo_priv->save_SDVOX & SDVO_ENABLE) - { - for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev); - status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); - if (status == SDVO_CMD_STATUS_SUCCESS && !input1) - DRM_DEBUG_KMS("First %s output reported failure to " - "sync\n", SDVO_NAME(sdvo_priv)); - } - - intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); -} - static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1472,37 +1349,39 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) +static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); - status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } +/* No use! */ +#if 0 struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; - struct intel_output *iout = NULL; + struct intel_encoder *iout = NULL; struct intel_sdvo_priv *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_intel_output(connector); + iout = to_intel_encoder(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; sdvo = iout->dev_priv; - if (sdvo->output_device == SDVOB && sdvoB) + if (sdvo->sdvo_reg == SDVOB && sdvoB) return connector; - if (sdvo->output_device == SDVOC && !sdvoB) + if (sdvo->sdvo_reg == SDVOC && !sdvoB) return connector; } @@ -1514,16 +1393,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; DRM_DEBUG_KMS("\n"); if (!connector) return 0; - intel_output = to_intel_output(connector); + intel_encoder = to_intel_encoder(connector); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (response[0] !=0) return 1; @@ -1535,30 +1414,31 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_encoder, &response, 2); if (on) { - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, 2); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_encoder, &response, 2); } +#endif static bool -intel_sdvo_multifunc_encoder(struct intel_output *intel_output) +intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int caps = 0; if (sdvo_priv->caps.output_flags & @@ -1592,12 +1472,17 @@ static struct drm_connector * intel_find_analog_connector(struct drm_device *dev) { struct drm_connector *connector; - struct intel_output *intel_output; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - intel_output = to_intel_output(connector); - if (intel_output->type == INTEL_OUTPUT_ANALOG) - return connector; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + intel_encoder = enc_to_intel_encoder(encoder); + if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (encoder == intel_attached_encoder(connector)) + return connector; + } + } } return NULL; } @@ -1619,18 +1504,20 @@ intel_analog_is_connected(struct drm_device *dev) } enum drm_connector_status -intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(connector, intel_encoder->ddc_bus); /* This is only applied to SDVO cards with multiple outputs */ - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { uint8_t saved_ddc, temp_ddc; saved_ddc = sdvo_priv->ddc_bus; temp_ddc = sdvo_priv->ddc_bus >> 1; @@ -1640,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) */ while(temp_ddc > 1) { sdvo_priv->ddc_bus = temp_ddc; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(connector, intel_encoder->ddc_bus); if (edid) { /* * When we can get the EDID, maybe it is the @@ -1658,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) /* when there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector */ - if (edid == NULL && - sdvo_priv->analog_ddc_bus && - !intel_analog_is_connected(intel_output->base.dev)) - edid = drm_get_edid(&intel_output->base, - sdvo_priv->analog_ddc_bus); + if (edid == NULL && sdvo_priv->analog_ddc_bus && + !intel_analog_is_connected(connector->dev)) + edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); + if (edid != NULL) { - /* Don't report the output as connected if it's a DVI-I - * connector with a non-digital EDID coming out. - */ - if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { - if (edid->input & DRM_EDID_INPUT_DIGITAL) - sdvo_priv->is_hdmi = - drm_detect_hdmi_monitor(edid); - else - status = connector_status_disconnected; - } + bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); + bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); - kfree(edid); - intel_output->base.display_info.raw_edid = NULL; + /* DDC bus is shared, match EDID to connector type */ + if (is_digital && need_digital) + sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + else if (is_digital != need_digital) + status = connector_status_disconnected; - } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + connector->display_info.raw_edid = NULL; + } else status = connector_status_disconnected; + + kfree(edid); return status; } @@ -1688,16 +1571,20 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect { uint16_t response; u8 status; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - - intel_sdvo_write_cmd(intel_output, + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + enum drm_connector_status ret; + + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); if (sdvo_priv->is_tv) { /* add 30ms delay when the output type is SDVO-TV */ mdelay(30); } - status = intel_sdvo_read_response(intel_output, &response, 2); + status = intel_sdvo_read_response(intel_encoder, &response, 2); DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); @@ -1707,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect if (response == 0) return connector_status_disconnected; - if (intel_sdvo_multifunc_encoder(intel_output) && - sdvo_priv->attached_output != response) { - if (sdvo_priv->controlled_output != response && - intel_sdvo_output_setup(intel_output, response) != true) - return connector_status_unknown; - sdvo_priv->attached_output = response; + sdvo_priv->attached_output = response; + + if ((sdvo_connector->output_flag & response) == 0) + ret = connector_status_disconnected; + else if (response & SDVO_TMDS_MASK) + ret = intel_sdvo_hdmi_sink_detect(connector); + else + ret = connector_status_connected; + + /* May update encoder flag for like clock for SDVO TV, etc.*/ + if (ret == connector_status_connected) { + sdvo_priv->is_tv = false; + sdvo_priv->is_lvds = false; + intel_encoder->needs_tv_clock = false; + + if (response & SDVO_TV_MASK) { + sdvo_priv->is_tv = true; + intel_encoder->needs_tv_clock = true; + } + if (response & SDVO_LVDS_MASK) + sdvo_priv->is_lvds = true; } - return intel_sdvo_hdmi_sink_detect(connector, response); + + return ret; } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int num_modes; /* set the bus switch and get the modes */ - num_modes = intel_ddc_get_modes(intel_output); + num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1734,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) */ if (num_modes == 0 && sdvo_priv->analog_ddc_bus && - !intel_analog_is_connected(intel_output->base.dev)) { - struct i2c_adapter *digital_ddc_bus; - + !intel_analog_is_connected(connector->dev)) { /* Switch to the analog ddc bus and try that */ - digital_ddc_bus = intel_output->ddc_bus; - intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; - - (void) intel_ddc_get_modes(intel_output); - - intel_output->ddc_bus = digital_ddc_bus; + (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); } } @@ -1815,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct intel_output *output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; @@ -1836,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) sizeof(format_map) ? sizeof(format_map) : sizeof(struct intel_sdvo_sdtv_resolution_request)); - intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); + intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); - intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res)); - status = intel_sdvo_read_response(output, &reply, 3); + status = intel_sdvo_read_response(intel_encoder, &reply, 3); if (status != SDVO_CMD_STATUS_SUCCESS) return; @@ -1857,9 +1755,10 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct drm_display_mode *newmode; /* @@ -1867,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(intel_output); + intel_ddc_get_modes(connector, intel_encoder->ddc_bus); if (list_empty(&connector->probed_modes) == false) goto end; @@ -1896,12 +1795,12 @@ end: static int intel_sdvo_get_modes(struct drm_connector *connector) { - struct intel_output *output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - if (sdvo_priv->is_tv) + if (IS_TV(sdvo_connector)) intel_sdvo_get_tv_modes(connector); - else if (sdvo_priv->is_lvds == true) + else if (IS_LVDS(sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); @@ -1914,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; struct drm_device *dev = connector->dev; - if (sdvo_priv->is_tv) { + if (IS_TV(sdvo_priv)) { if (sdvo_priv->left_property) drm_property_destroy(dev, sdvo_priv->left_property); if (sdvo_priv->right_property) @@ -1931,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) drm_property_destroy(dev, sdvo_priv->hpos_property); if (sdvo_priv->vpos_property) drm_property_destroy(dev, sdvo_priv->vpos_property); - } - if (sdvo_priv->is_tv) { if (sdvo_priv->saturation_property) drm_property_destroy(dev, sdvo_priv->saturation_property); @@ -1942,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) if (sdvo_priv->hue_property) drm_property_destroy(dev, sdvo_priv->hue_property); } - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { if (sdvo_priv->brightness_property) drm_property_destroy(dev, sdvo_priv->brightness_property); @@ -1952,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) static void intel_sdvo_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); - if (sdvo_priv->analog_ddc_bus) - intel_i2c_destroy(sdvo_priv->analog_ddc_bus); - - if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) - drm_mode_destroy(connector->dev, - sdvo_priv->sdvo_lvds_fixed_mode); - - if (sdvo_priv->tv_format_property) + if (sdvo_connector->tv_format_property) drm_property_destroy(connector->dev, - sdvo_priv->tv_format_property); - - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) - intel_sdvo_destroy_enhance_property(connector); + sdvo_connector->tv_format_property); + intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - - kfree(intel_output); + kfree(connector); } static int @@ -1984,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; @@ -1997,105 +1882,105 @@ intel_sdvo_set_property(struct drm_connector *connector, if (ret < 0) goto out; - if (property == sdvo_priv->tv_format_property) { + if (property == sdvo_connector->tv_format_property) { if (val >= TV_FORMAT_NUM) { ret = -EINVAL; goto out; } if (sdvo_priv->tv_format_name == - sdvo_priv->tv_format_supported[val]) + sdvo_connector->tv_format_supported[val]) goto out; - sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; + sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; changed = true; } - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { cmd = 0; temp_value = val; - if (sdvo_priv->left_property == property) { + if (sdvo_connector->left_property == property) { drm_connector_property_set_value(connector, - sdvo_priv->right_property, val); - if (sdvo_priv->left_margin == temp_value) + sdvo_connector->right_property, val); + if (sdvo_connector->left_margin == temp_value) goto out; - sdvo_priv->left_margin = temp_value; - sdvo_priv->right_margin = temp_value; - temp_value = sdvo_priv->max_hscan - - sdvo_priv->left_margin; + sdvo_connector->left_margin = temp_value; + sdvo_connector->right_margin = temp_value; + temp_value = sdvo_connector->max_hscan - + sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; - } else if (sdvo_priv->right_property == property) { + } else if (sdvo_connector->right_property == property) { drm_connector_property_set_value(connector, - sdvo_priv->left_property, val); - if (sdvo_priv->right_margin == temp_value) + sdvo_connector->left_property, val); + if (sdvo_connector->right_margin == temp_value) goto out; - sdvo_priv->left_margin = temp_value; - sdvo_priv->right_margin = temp_value; - temp_value = sdvo_priv->max_hscan - - sdvo_priv->left_margin; + sdvo_connector->left_margin = temp_value; + sdvo_connector->right_margin = temp_value; + temp_value = sdvo_connector->max_hscan - + sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; - } else if (sdvo_priv->top_property == property) { + } else if (sdvo_connector->top_property == property) { drm_connector_property_set_value(connector, - sdvo_priv->bottom_property, val); - if (sdvo_priv->top_margin == temp_value) + sdvo_connector->bottom_property, val); + if (sdvo_connector->top_margin == temp_value) goto out; - sdvo_priv->top_margin = temp_value; - sdvo_priv->bottom_margin = temp_value; - temp_value = sdvo_priv->max_vscan - - sdvo_priv->top_margin; + sdvo_connector->top_margin = temp_value; + sdvo_connector->bottom_margin = temp_value; + temp_value = sdvo_connector->max_vscan - + sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; - } else if (sdvo_priv->bottom_property == property) { + } else if (sdvo_connector->bottom_property == property) { drm_connector_property_set_value(connector, - sdvo_priv->top_property, val); - if (sdvo_priv->bottom_margin == temp_value) + sdvo_connector->top_property, val); + if (sdvo_connector->bottom_margin == temp_value) goto out; - sdvo_priv->top_margin = temp_value; - sdvo_priv->bottom_margin = temp_value; - temp_value = sdvo_priv->max_vscan - - sdvo_priv->top_margin; + sdvo_connector->top_margin = temp_value; + sdvo_connector->bottom_margin = temp_value; + temp_value = sdvo_connector->max_vscan - + sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; - } else if (sdvo_priv->hpos_property == property) { - if (sdvo_priv->cur_hpos == temp_value) + } else if (sdvo_connector->hpos_property == property) { + if (sdvo_connector->cur_hpos == temp_value) goto out; cmd = SDVO_CMD_SET_POSITION_H; - sdvo_priv->cur_hpos = temp_value; - } else if (sdvo_priv->vpos_property == property) { - if (sdvo_priv->cur_vpos == temp_value) + sdvo_connector->cur_hpos = temp_value; + } else if (sdvo_connector->vpos_property == property) { + if (sdvo_connector->cur_vpos == temp_value) goto out; cmd = SDVO_CMD_SET_POSITION_V; - sdvo_priv->cur_vpos = temp_value; - } else if (sdvo_priv->saturation_property == property) { - if (sdvo_priv->cur_saturation == temp_value) + sdvo_connector->cur_vpos = temp_value; + } else if (sdvo_connector->saturation_property == property) { + if (sdvo_connector->cur_saturation == temp_value) goto out; cmd = SDVO_CMD_SET_SATURATION; - sdvo_priv->cur_saturation = temp_value; - } else if (sdvo_priv->contrast_property == property) { - if (sdvo_priv->cur_contrast == temp_value) + sdvo_connector->cur_saturation = temp_value; + } else if (sdvo_connector->contrast_property == property) { + if (sdvo_connector->cur_contrast == temp_value) goto out; cmd = SDVO_CMD_SET_CONTRAST; - sdvo_priv->cur_contrast = temp_value; - } else if (sdvo_priv->hue_property == property) { - if (sdvo_priv->cur_hue == temp_value) + sdvo_connector->cur_contrast = temp_value; + } else if (sdvo_connector->hue_property == property) { + if (sdvo_connector->cur_hue == temp_value) goto out; cmd = SDVO_CMD_SET_HUE; - sdvo_priv->cur_hue = temp_value; - } else if (sdvo_priv->brightness_property == property) { - if (sdvo_priv->cur_brightness == temp_value) + sdvo_connector->cur_hue = temp_value; + } else if (sdvo_connector->brightness_property == property) { + if (sdvo_connector->cur_brightness == temp_value) goto out; cmd = SDVO_CMD_SET_BRIGHTNESS; - sdvo_priv->cur_brightness = temp_value; + sdvo_connector->cur_brightness = temp_value; } if (cmd) { - intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); - status = intel_sdvo_read_response(intel_output, + intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO command \n"); @@ -2121,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_sdvo_save, - .restore = intel_sdvo_restore, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, @@ -2132,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); + if (sdvo_priv->analog_ddc_bus) + intel_i2c_destroy(sdvo_priv->analog_ddc_bus); + + if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) + drm_mode_destroy(encoder->dev, + sdvo_priv->sdvo_lvds_fixed_mode); + drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { @@ -2153,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { * outputs, then LVDS outputs. */ static void -intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) +intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, + struct intel_sdvo_priv *sdvo, u32 reg) { - uint16_t mask = 0; - unsigned int num_bits; + struct sdvo_device_mapping *mapping; - /* Make a mask of outputs less than or equal to our own priority in the - * list. - */ - switch (dev_priv->controlled_output) { - case SDVO_OUTPUT_LVDS1: - mask |= SDVO_OUTPUT_LVDS1; - case SDVO_OUTPUT_LVDS0: - mask |= SDVO_OUTPUT_LVDS0; - case SDVO_OUTPUT_TMDS1: - mask |= SDVO_OUTPUT_TMDS1; - case SDVO_OUTPUT_TMDS0: - mask |= SDVO_OUTPUT_TMDS0; - case SDVO_OUTPUT_RGB1: - mask |= SDVO_OUTPUT_RGB1; - case SDVO_OUTPUT_RGB0: - mask |= SDVO_OUTPUT_RGB0; - break; - } - - /* Count bits to find what number we are in the priority list. */ - mask &= dev_priv->caps.output_flags; - num_bits = hweight16(mask); - if (num_bits > 3) { - /* if more than 3 outputs, default to DDC bus 3 for now */ - num_bits = 3; - } + if (IS_SDVOB(reg)) + mapping = &(dev_priv->sdvo_mappings[0]); + else + mapping = &(dev_priv->sdvo_mappings[1]); - /* Corresponds to SDVO_CONTROL_BUS_DDCx */ - dev_priv->ddc_bus = 1 << num_bits; + sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); } static bool -intel_sdvo_get_digital_encoding_mode(struct intel_output *output) +intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) { struct intel_sdvo_priv *sdvo_priv = output->dev_priv; uint8_t status; - intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); + if (device == 0) + intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); + else + intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); @@ -2204,42 +2082,40 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) return true; } -static struct intel_output * -intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) +static struct intel_encoder * +intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) { struct drm_device *dev = chan->drm_dev; - struct drm_connector *connector; - struct intel_output *intel_output = NULL; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder = NULL; - list_for_each_entry(connector, - &dev->mode_config.connector_list, head) { - if (to_intel_output(connector)->ddc_bus == &chan->adapter) { - intel_output = to_intel_output(connector); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + intel_encoder = enc_to_intel_encoder(encoder); + if (intel_encoder->ddc_bus == &chan->adapter) break; - } } - return intel_output; + return intel_encoder; } static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_sdvo_priv *sdvo_priv; struct i2c_algo_bit_data *algo_data; const struct i2c_algorithm *algo; algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; - intel_output = - intel_sdvo_chan_to_intel_output( + intel_encoder = + intel_sdvo_chan_to_intel_encoder( (struct intel_i2c_chan *)(algo_data->data)); - if (intel_output == NULL) + if (intel_encoder == NULL) return -EINVAL; - sdvo_priv = intel_output->dev_priv; - algo = intel_output->i2c_bus->algo; + sdvo_priv = intel_encoder->dev_priv; + algo = intel_encoder->i2c_bus->algo; - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); return algo->master_xfer(i2c_adap, msgs, num); } @@ -2248,12 +2124,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { }; static u8 -intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) +intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; - if (output_device == SDVOB) { + if (IS_SDVOB(sdvo_reg)) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { @@ -2278,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ - if (output_device == SDVOB) + if (IS_SDVOB(sdvo_reg)) return 0x70; else return 0x72; } -static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) +static bool +intel_sdvo_connector_alloc (struct intel_connector **ret) { - DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); - return 1; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *sdvo_connector; + + *ret = kzalloc(sizeof(*intel_connector) + + sizeof(*sdvo_connector), GFP_KERNEL); + if (!*ret) + return false; + + intel_connector = *ret; + sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); + intel_connector->dev_priv = sdvo_connector; + + return true; } -static struct dmi_system_id intel_sdvo_bad_tv[] = { - { - .callback = intel_sdvo_bad_tv_callback, - .ident = "IntelG45/ICH10R/DME1737", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), - DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), - }, - }, +static void +intel_sdvo_connector_create (struct drm_encoder *encoder, + struct drm_connector *connector) +{ + drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, + connector->connector_type); - { } /* terminating entry */ -}; + drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + + drm_mode_connector_attach_encoder(connector, encoder); + drm_sysfs_connector_add(connector); +} static bool -intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) +intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) { - struct drm_connector *connector = &intel_output->base; - struct drm_encoder *encoder = &intel_output->enc; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - bool ret = true, registered = false; + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *sdvo_connector; + + if (!intel_sdvo_connector_alloc(&intel_connector)) + return false; + + sdvo_connector = intel_connector->dev_priv; + + if (device == 0) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; + sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; + } else if (device == 1) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; + sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; + } + + connector = &intel_connector->base; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_TMDS; + connector->connector_type = DRM_MODE_CONNECTOR_DVID; + + if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) + && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) + && sdvo_priv->is_hdmi) { + /* enable hdmi encoding mode if supported */ + intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_encoder, + SDVO_COLORIMETRY_RGB256); + connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + } + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); + + intel_sdvo_connector_create(encoder, connector); + + return true; +} + +static bool +intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) +{ + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *sdvo_connector; + + if (!intel_sdvo_connector_alloc(&intel_connector)) + return false; + + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_connector = intel_connector->dev_priv; + + sdvo_priv->controlled_output |= type; + sdvo_connector->output_flag = type; + + sdvo_priv->is_tv = true; + intel_encoder->needs_tv_clock = true; + intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + + intel_sdvo_connector_create(encoder, connector); + + intel_sdvo_tv_create_property(connector, type); + + intel_sdvo_create_enhance_property(connector); + + return true; +} + +static bool +intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) +{ + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *sdvo_connector; + + if (!intel_sdvo_connector_alloc(&intel_connector)) + return false; + + connector = &intel_connector->base; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + sdvo_connector = intel_connector->dev_priv; + + if (device == 0) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; + sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; + } else if (device == 1) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; + sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; + } + + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); + + intel_sdvo_connector_create(encoder, connector); + return true; +} + +static bool +intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) +{ + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *sdvo_connector; + + if (!intel_sdvo_connector_alloc(&intel_connector)) + return false; + + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_connector = intel_connector->dev_priv; + + sdvo_priv->is_lvds = true; + + if (device == 0) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; + sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; + } else if (device == 1) { + sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; + sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; + } + + intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); + + intel_sdvo_connector_create(encoder, connector); + intel_sdvo_create_enhance_property(connector); + return true; +} + +static bool +intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) +{ + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; sdvo_priv->is_tv = false; - intel_output->needs_tv_clock = false; + intel_encoder->needs_tv_clock = false; sdvo_priv->is_lvds = false; - if (device_is_registered(&connector->kdev)) { - drm_sysfs_connector_remove(connector); - registered = true; - } + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ - if (flags & - (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { - if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; - else - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; - - encoder->encoder_type = DRM_MODE_ENCODER_TMDS; - connector->connector_type = DRM_MODE_CONNECTOR_DVID; - - if (intel_sdvo_get_supp_encode(intel_output, - &sdvo_priv->encode) && - intel_sdvo_get_digital_encoding_mode(intel_output) && - sdvo_priv->is_hdmi) { - /* enable hdmi encoding mode if supported */ - intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); - intel_sdvo_set_colorimetry(intel_output, - SDVO_COLORIMETRY_RGB256); - connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; - intel_output->clone_mask = - (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); - } - } else if ((flags & SDVO_OUTPUT_SVID0) && - !dmi_check_system(intel_sdvo_bad_tv)) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - } else if (flags & SDVO_OUTPUT_RGB0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; - encoder->encoder_type = DRM_MODE_ENCODER_DAC; - connector->connector_type = DRM_MODE_CONNECTOR_VGA; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_RGB1) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; - encoder->encoder_type = DRM_MODE_ENCODER_DAC; - connector->connector_type = DRM_MODE_CONNECTOR_VGA; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_CVBS0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - } else if (flags & SDVO_OUTPUT_LVDS0) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; - encoder->encoder_type = DRM_MODE_ENCODER_LVDS; - connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); - } else if (flags & SDVO_OUTPUT_LVDS1) { - - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; - encoder->encoder_type = DRM_MODE_ENCODER_LVDS; - connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); - } else { + if (flags & SDVO_OUTPUT_TMDS0) + if (!intel_sdvo_dvi_init(intel_encoder, 0)) + return false; + if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (!intel_sdvo_dvi_init(intel_encoder, 1)) + return false; + + /* TV has no XXX1 function block */ + if (flags & SDVO_OUTPUT_SVID0) + if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) + return false; + + if (flags & SDVO_OUTPUT_CVBS0) + if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) + return false; + + if (flags & SDVO_OUTPUT_RGB0) + if (!intel_sdvo_analog_init(intel_encoder, 0)) + return false; + + if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (!intel_sdvo_analog_init(intel_encoder, 1)) + return false; + + if (flags & SDVO_OUTPUT_LVDS0) + if (!intel_sdvo_lvds_init(intel_encoder, 0)) + return false; + + if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (!intel_sdvo_lvds_init(intel_encoder, 1)) + return false; + + if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; sdvo_priv->controlled_output = 0; @@ -2399,32 +2392,29 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); - ret = false; + return false; } - intel_output->crtc_mask = (1 << 0) | (1 << 1); - - if (ret && registered) - ret = drm_sysfs_connector_add(connector) == 0 ? true : false; - - - return ret; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + return true; } -static void intel_sdvo_tv_create_property(struct drm_connector *connector) +static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; struct intel_sdvo_tv_format format; uint32_t format_map, i; uint8_t status; - intel_sdvo_set_target_output(intel_output, - sdvo_priv->controlled_output); + intel_sdvo_set_target_output(intel_encoder, type); - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &format, sizeof(format)); if (status != SDVO_CMD_STATUS_SUCCESS) return; @@ -2435,43 +2425,45 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) if (format_map == 0) return; - sdvo_priv->format_supported_num = 0; + sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) { - sdvo_priv->tv_format_supported - [sdvo_priv->format_supported_num++] = + sdvo_connector->tv_format_supported + [sdvo_connector->format_supported_num++] = tv_format_names[i]; } - sdvo_priv->tv_format_property = + sdvo_connector->tv_format_property = drm_property_create( connector->dev, DRM_MODE_PROP_ENUM, - "mode", sdvo_priv->format_supported_num); + "mode", sdvo_connector->format_supported_num); - for (i = 0; i < sdvo_priv->format_supported_num; i++) + for (i = 0; i < sdvo_connector->format_supported_num; i++) drm_property_add_enum( - sdvo_priv->tv_format_property, i, - i, sdvo_priv->tv_format_supported[i]); + sdvo_connector->tv_format_property, i, + i, sdvo_connector->tv_format_supported[i]); - sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; + sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; drm_connector_attach_property( - connector, sdvo_priv->tv_format_property, 0); + connector, sdvo_connector->tv_format_property, 0); } static void intel_sdvo_create_enhance_property(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; struct intel_sdvo_enhancements_reply sdvo_data; struct drm_device *dev = connector->dev; uint8_t status; uint16_t response, data_value[2]; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, &sdvo_data, + status = intel_sdvo_read_response(intel_encoder, &sdvo_data, sizeof(sdvo_data)); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS(" incorrect response is returned\n"); @@ -2482,23 +2474,23 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) DRM_DEBUG_KMS("No enhancement is supported\n"); return; } - if (sdvo_priv->is_tv) { + if (IS_TV(sdvo_priv)) { /* when horizontal overscan is supported, Add the left/right * property */ if (sdvo_data.overscan_h) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO max " "h_overscan\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); @@ -2528,18 +2520,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.overscan_v) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO max " "v_overscan\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); @@ -2569,17 +2561,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.position_h) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); @@ -2600,17 +2592,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.position_v) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); @@ -2630,20 +2622,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) "default %d, current %d\n", data_value[0], data_value[1], response); } - } - if (sdvo_priv->is_tv) { if (sdvo_data.saturation) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); @@ -2665,17 +2655,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.contrast) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); @@ -2696,17 +2686,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.hue) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); @@ -2727,19 +2717,19 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } } - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { if (sdvo_data.brightness) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); @@ -2764,109 +2754,98 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) return; } -bool intel_sdvo_init(struct drm_device *dev, int output_device) +bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_sdvo_priv *sdvo_priv; - u8 ch[0x40]; int i; + u32 i2c_reg, ddc_reg, analog_ddc_reg; - intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); - if (!intel_output) { + intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); + if (!intel_encoder) { return false; } - sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); - sdvo_priv->output_device = output_device; + sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); + sdvo_priv->sdvo_reg = sdvo_reg; + + intel_encoder->dev_priv = sdvo_priv; + intel_encoder->type = INTEL_OUTPUT_SDVO; - intel_output->dev_priv = sdvo_priv; - intel_output->type = INTEL_OUTPUT_SDVO; + if (HAS_PCH_SPLIT(dev)) { + i2c_reg = PCH_GPIOE; + ddc_reg = PCH_GPIOE; + analog_ddc_reg = PCH_GPIOA; + } else { + i2c_reg = GPIOE; + ddc_reg = GPIOE; + analog_ddc_reg = GPIOA; + } /* setup the DDC bus. */ - if (output_device == SDVOB) - intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); + if (IS_SDVOB(sdvo_reg)) + intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); else - intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); + intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); - if (!intel_output->i2c_bus) + if (!intel_encoder->i2c_bus) goto err_inteloutput; - sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); + sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); /* Save the bit-banging i2c functionality for use by the DDC wrapper */ - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; + intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { + if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", - output_device == SDVOB ? 'B' : 'C'); + IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err_i2c; } } /* setup the DDC bus. */ - if (output_device == SDVOB) { - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, + if (IS_SDVOB(sdvo_reg)) { + intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); + sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, "SDVOB/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, + intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); + sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, "SDVOC/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } - if (intel_output->ddc_bus == NULL) + if (intel_encoder->ddc_bus == NULL) goto err_i2c; /* Wrap with our custom algo which switches to DDC mode */ - intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; + intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; + + /* encoder type will be decided later */ + drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); + drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ - intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); + intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); - if (intel_sdvo_output_setup(intel_output, + if (intel_sdvo_output_setup(intel_encoder, sdvo_priv->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", - output_device == SDVOB ? 'B' : 'C'); + IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err_i2c; } - - connector = &intel_output->base; - drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, - connector->connector_type); - - drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; - - drm_encoder_init(dev, &intel_output->enc, - &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); - - drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); - - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); - if (sdvo_priv->is_tv) - intel_sdvo_tv_create_property(connector); - - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) - intel_sdvo_create_enhance_property(connector); - - drm_sysfs_connector_add(connector); - - intel_sdvo_select_ddc_bus(sdvo_priv); + intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); - intel_sdvo_get_input_pixel_clock_range(intel_output, + intel_sdvo_get_input_pixel_clock_range(intel_encoder, &sdvo_priv->pixel_clock_min, &sdvo_priv->pixel_clock_max); @@ -2893,12 +2872,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) err_i2c: if (sdvo_priv->analog_ddc_bus != NULL) intel_i2c_destroy(sdvo_priv->analog_ddc_bus); - if (intel_output->ddc_bus != NULL) - intel_i2c_destroy(intel_output->ddc_bus); - if (intel_output->i2c_bus != NULL) - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_encoder->ddc_bus != NULL) + intel_i2c_destroy(intel_encoder->ddc_bus); + if (intel_encoder->i2c_bus != NULL) + intel_i2c_destroy(intel_encoder->i2c_bus); err_inteloutput: - kfree(intel_output); + kfree(intel_encoder); return false; } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..d2d4e4045ca9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) } } -static void -intel_tv_save(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - int i; - - tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); - tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); - tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); - tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); - tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); - tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); - tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); - tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); - tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); - tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); - tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); - tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); - tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); - - tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); - tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); - tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); - tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); - tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); - tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); - tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); - tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); - tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); - tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); - tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); - tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); - tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); - - for (i = 0; i < 60; i++) - tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); - for (i = 0; i < 60; i++) - tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); - for (i = 0; i < 43; i++) - tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); - for (i = 0; i < 43; i++) - tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); - - tv_priv->save_TV_DAC = I915_READ(TV_DAC); - tv_priv->save_TV_CTL = I915_READ(TV_CTL); -} - -static void -intel_tv_restore(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - struct drm_crtc *crtc = connector->encoder->crtc; - struct intel_crtc *intel_crtc; - int i; - - /* FIXME: No CRTC? */ - if (!crtc) - return; - - intel_crtc = to_intel_crtc(crtc); - I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); - I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); - I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); - I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); - I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); - I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); - I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); - I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); - I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); - I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); - I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); - I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); - I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); - - I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); - I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); - I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); - I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); - I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); - I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); - I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); - I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); - - { - int pipeconf_reg = (intel_crtc->pipe == 0) ? - PIPEACONF : PIPEBCONF; - int dspcntr_reg = (intel_crtc->plane == 0) ? - DSPACNTR : DSPBCNTR; - int pipeconf = I915_READ(pipeconf_reg); - int dspcntr = I915_READ(dspcntr_reg); - int dspbase_reg = (intel_crtc->plane == 0) ? - DSPAADDR : DSPBADDR; - /* Pipe must be off here */ - I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - - if (!IS_I9XX(dev)) { - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank(dev); - } - - I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); - /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank(dev); - - /* Filter ctl must be set before TV_WIN_SIZE */ - I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); - I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); - I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); - I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); - I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); - I915_WRITE(pipeconf_reg, pipeconf); - I915_WRITE(dspcntr_reg, dspcntr); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - } - - for (i = 0; i < 60; i++) - I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); - for (i = 0; i < 60; i++) - I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); - for (i = 0; i < 43; i++) - I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); - for (i = 0; i < 43; i++) - I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); - - I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); - I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); -} - static const struct tv_mode * intel_tv_mode_lookup (char *tv_format) { @@ -1068,9 +931,9 @@ intel_tv_mode_lookup (char *tv_format) } static const struct tv_mode * -intel_tv_mode_find (struct intel_output *intel_output) +intel_tv_mode_find (struct intel_encoder *intel_encoder) { - struct intel_tv_priv *tv_priv = intel_output->dev_priv; + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; return intel_tv_mode_lookup(tv_priv->tv_format); } @@ -1078,8 +941,9 @@ intel_tv_mode_find (struct intel_output *intel_output) static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) @@ -1095,8 +959,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct drm_device *dev = encoder->dev; struct drm_mode_config *drm_config = &dev->mode_config; - struct intel_output *intel_output = enc_to_intel_output(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); struct drm_encoder *other_encoder; if (!tv_mode) @@ -1121,9 +985,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); u32 tv_ctl; u32 hctl1, hctl2, hctl3; u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; @@ -1360,9 +1224,9 @@ static const struct drm_display_mode reported_modes[] = { * \return false if TV is disconnected. */ static int -intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) +intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -1441,9 +1305,10 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); int i; if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == @@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector) { struct drm_crtc *crtc; struct drm_display_mode mode; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; int dpms_mode; int type = tv_priv->type; @@ -1485,12 +1350,14 @@ intel_tv_detect(struct drm_connector *connector) drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); if (encoder->crtc && encoder->crtc->enabled) { - type = intel_tv_detect_type(encoder->crtc, intel_output); + type = intel_tv_detect_type(encoder->crtc, intel_encoder); } else { - crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); + crtc = intel_get_load_detect_pipe(intel_encoder, connector, + &mode, &dpms_mode); if (crtc) { - type = intel_tv_detect_type(crtc, intel_output); - intel_release_load_detect_pipe(intel_output, dpms_mode); + type = intel_tv_detect_type(crtc, intel_encoder); + intel_release_load_detect_pipe(intel_encoder, connector, + dpms_mode); } else type = -1; } @@ -1525,8 +1392,9 @@ static void intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; @@ -1550,12 +1418,13 @@ static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); int j, count = 0; u64 tmp; - for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); + for (j = 0; j < ARRAY_SIZE(input_res_table); j++) { struct input_res *input = &input_res_table[j]; unsigned int hactive_s = input->w; @@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector) static void intel_tv_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(connector); } @@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop uint64_t val) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; @@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = intel_tv_save, - .restore = intel_tv_restore, .detect = intel_tv_detect, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, @@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_best_encoder, + .best_encoder = intel_attached_encoder, }; static void intel_tv_enc_destroy(struct drm_encoder *encoder) { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1740,7 +1608,8 @@ intel_tv_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; struct intel_tv_priv *tv_priv; u32 tv_dac_on, tv_dac_off, save_tv_dac; char **tv_format_names; @@ -1780,28 +1649,34 @@ intel_tv_init(struct drm_device *dev) (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; - intel_output = kzalloc(sizeof(struct intel_output) + + intel_encoder = kzalloc(sizeof(struct intel_encoder) + sizeof(struct intel_tv_priv), GFP_KERNEL); - if (!intel_output) { + if (!intel_encoder) { + return; + } + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_encoder); return; } - connector = &intel_output->base; + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); - drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); - tv_priv = (struct intel_tv_priv *)(intel_output + 1); - intel_output->type = INTEL_OUTPUT_TVOUT; - intel_output->crtc_mask = (1 << 0) | (1 << 1); - intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); - intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); - intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); - intel_output->dev_priv = tv_priv; + drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); + intel_encoder->type = INTEL_OUTPUT_TVOUT; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); + intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); + intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); + intel_encoder->dev_priv = tv_priv; tv_priv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ @@ -1812,7 +1687,7 @@ intel_tv_init(struct drm_device *dev) tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); - drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 32db806f3b5a..acd31ed861ef 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_dp.o nouveau_grctx.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ - nv04_fb.o nv10_fb.o nv40_fb.o \ + nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ @@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ - nv17_gpio.o + nv17_gpio.o nv50_gpio.o \ + nv50_calc.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 0e0730a53137..d4bcca8a5133 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -1,5 +1,6 @@ #include <linux/pci.h> #include <linux/acpi.h> +#include <linux/slab.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> @@ -33,7 +34,7 @@ static struct nouveau_dsm_priv { bool dsm_detected; acpi_handle dhandle; - acpi_handle dsm_handle; + acpi_handle rom_handle; } nouveau_dsm_priv; static const char nouveau_dsm_muid[] = { @@ -106,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { if (id == VGA_SWITCHEROO_IGD) - return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); else - return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); } static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, @@ -117,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, if (id == VGA_SWITCHEROO_IGD) return 0; - return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); + return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } static int nouveau_dsm_init(void) @@ -150,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; + status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); if (ACPI_FAILURE(status)) { return false; } - ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, - NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); + ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, + NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); if (ret < 0) return false; nouveau_dsm_priv.dhandle = dhandle; - nouveau_dsm_priv.dsm_handle = nvidia_handle; return true; } @@ -172,6 +173,7 @@ static bool nouveau_dsm_detect(void) struct pci_dev *pdev = NULL; int has_dsm = 0; int vga_count = 0; + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; @@ -179,7 +181,7 @@ static bool nouveau_dsm_detect(void) } if (vga_count == 2 && has_dsm) { - acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); + acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; @@ -203,3 +205,57 @@ void nouveau_unregister_dsm_handler(void) { vga_switcheroo_unregister_handler(); } + +/* retrieve the ROM in 4k blocks */ +static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object rom_arg_elements[2], *obj; + struct acpi_object_list rom_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + rom_arg.count = 2; + rom_arg.pointer = &rom_arg_elements[0]; + + rom_arg_elements[0].type = ACPI_TYPE_INTEGER; + rom_arg_elements[0].integer.value = offset; + + rom_arg_elements[1].type = ACPI_TYPE_INTEGER; + rom_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, len); + kfree(buffer.pointer); + return len; +} + +bool nouveau_acpi_rom_supported(struct pci_dev *pdev) +{ + acpi_status status; + acpi_handle dhandle, rom_handle; + + if (!nouveau_dsm_priv.dsm_detected) + return false; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_ROM", &rom_handle); + if (ACPI_FAILURE(status)) + return false; + + nouveau_dsm_priv.rom_handle = rom_handle; + return true; +} + +int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) +{ + return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 75bceee76044..e492919faf44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -26,6 +26,7 @@ #define NV_DEBUG_NOTRACE #include "nouveau_drv.h" #include "nouveau_hw.h" +#include "nouveau_encoder.h" /* these defines are made up */ #define NV_CIO_CRE_44_HEADA 0x0 @@ -177,41 +178,51 @@ out: pci_disable_rom(dev->pdev); } +static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) +{ + int i; + int ret; + int size = 64 * 1024; + + if (!nouveau_acpi_rom_supported(dev->pdev)) + return; + + for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { + ret = nouveau_acpi_get_bios_chunk(data, + (i * ROM_BIOS_PAGE), + ROM_BIOS_PAGE); + if (ret <= 0) + break; + } + return; +} + struct methods { const char desc[8]; void (*loadbios)(struct drm_device *, uint8_t *); const bool rw; }; -static struct methods nv04_methods[] = { - { "PROM", load_vbios_prom, false }, - { "PRAMIN", load_vbios_pramin, true }, - { "PCIROM", load_vbios_pci, true }, -}; - -static struct methods nv50_methods[] = { +static struct methods shadow_methods[] = { { "PRAMIN", load_vbios_pramin, true }, { "PROM", load_vbios_prom, false }, { "PCIROM", load_vbios_pci, true }, + { "ACPI", load_vbios_acpi, true }, }; -#define METHODCNT 3 - static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct methods *methods; - int i; + const int nr_methods = ARRAY_SIZE(shadow_methods); + struct methods *methods = shadow_methods; int testscore = 3; - int scores[METHODCNT]; + int scores[nr_methods], i; if (nouveau_vbios) { - methods = nv04_methods; - for (i = 0; i < METHODCNT; i++) + for (i = 0; i < nr_methods; i++) if (!strcasecmp(nouveau_vbios, methods[i].desc)) break; - if (i < METHODCNT) { + if (i < nr_methods) { NV_INFO(dev, "Attempting to use BIOS image from %s\n", methods[i].desc); @@ -223,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); } - if (dev_priv->card_type < NV_50) - methods = nv04_methods; - else - methods = nv50_methods; - - for (i = 0; i < METHODCNT; i++) { + for (i = 0; i < nr_methods; i++) { NV_TRACE(dev, "Attempting to load BIOS image from %s\n", methods[i].desc); data[0] = data[1] = 0; /* avoid reuse of previous image */ @@ -239,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) } while (--testscore > 0) { - for (i = 0; i < METHODCNT; i++) { + for (i = 0; i < nr_methods; i++) { if (scores[i] == testscore) { NV_TRACE(dev, "Using BIOS image from %s\n", methods[i].desc); @@ -256,6 +262,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) struct init_tbl_entry { char *name; uint8_t id; + /* Return: + * > 0: success, length of opcode + * 0: success, but abort further parsing of table (INIT_DONE etc) + * < 0: failure, table parsing will be aborted + */ int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; @@ -709,6 +720,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) return dcb_entry; } +static int +read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) +{ + uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; + int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; + int recordoffset = 0, rdofs = 1, wrofs = 0; + uint8_t port_type = 0; + + if (!i2ctable) + return -EINVAL; + + if (dcb_version >= 0x30) { + if (i2ctable[0] != dcb_version) /* necessary? */ + NV_WARN(dev, + "DCB I2C table version mismatch (%02X vs %02X)\n", + i2ctable[0], dcb_version); + dcb_i2c_ver = i2ctable[0]; + headerlen = i2ctable[1]; + if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) + i2c_entries = i2ctable[2]; + else + NV_WARN(dev, + "DCB I2C table has more entries than indexable " + "(%d entries, max %d)\n", i2ctable[2], + DCB_MAX_NUM_I2C_ENTRIES); + entry_len = i2ctable[3]; + /* [4] is i2c_default_indices, read in parse_dcb_table() */ + } + /* + * It's your own fault if you call this function on a DCB 1.1 BIOS -- + * the test below is for DCB 1.2 + */ + if (dcb_version < 0x14) { + recordoffset = 2; + rdofs = 0; + wrofs = 1; + } + + if (index == 0xf) + return 0; + if (index >= i2c_entries) { + NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", + index, i2ctable[2]); + return -ENOENT; + } + if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { + NV_ERROR(dev, "DCB I2C entry invalid\n"); + return -EINVAL; + } + + if (dcb_i2c_ver >= 0x30) { + port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; + + /* + * Fixup for chips using same address offset for read and + * write. + */ + if (port_type == 4) /* seen on C51 */ + rdofs = wrofs = 1; + if (port_type >= 5) /* G80+ */ + rdofs = wrofs = 0; + } + + if (dcb_i2c_ver >= 0x40) { + if (port_type != 5 && port_type != 6) + NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); + + i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]); + } + + i2c->port_type = port_type; + i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; + i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; + + return 0; +} + static struct nouveau_i2c_chan * init_i2c_device_find(struct drm_device *dev, int i2c_index) { @@ -727,6 +815,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index) } if (i2c_index == 0x80) /* g80+ */ i2c_index = dcb->i2c_default_indices & 0xf; + else + if (i2c_index == 0x81) + i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; + + if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) { + NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); + return NULL; + } + + /* Make sure i2c table entry has been parsed, it may not + * have been if this is a bus not referenced by a DCB encoder + */ + read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, + i2c_index, &dcb->i2c[i2c_index]); return nouveau_i2c_find(dev, i2c_index); } @@ -818,7 +920,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return 0; + return -EINVAL; } configval = ROM32(bios->data[offset + 11 + config * 4]); @@ -920,7 +1022,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return 0; + return -EINVAL; } freq = ROM16(bios->data[offset + 12 + config * 2]); @@ -1067,6 +1169,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, } static int +init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_DP_CONDITION opcode: 0x3A ('') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): "sub" opcode + * offset + 2 (8 bit): unknown + * + */ + + struct bit_displayport_encoder_table *dpe = NULL; + struct dcb_entry *dcb = bios->display.output; + struct drm_device *dev = bios->dev; + uint8_t cond = bios->data[offset + 1]; + int dummy; + + BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); + + if (!iexec->execute) + return 3; + + dpe = nouveau_bios_dp_table(dev, dcb, &dummy); + if (!dpe) { + NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); + return -EINVAL; + } + + switch (cond) { + case 0: + { + struct dcb_connector_table_entry *ent = + &bios->dcb.connector.entry[dcb->connector]; + + if (ent->type != DCB_CONNECTOR_eDP) + iexec->execute = false; + } + break; + case 1: + case 2: + if (!(dpe->unknown & cond)) + iexec->execute = false; + break; + case 5: + { + struct nouveau_i2c_chan *auxch; + int ret; + + auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); + if (!auxch) + return -ENODEV; + + ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); + if (ret) + return ret; + + if (cond & 1) + iexec->execute = false; + } + break; + default: + NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond); + break; + } + + if (iexec->execute) + BIOSLOG(bios, "0x%04X: continuing to execute\n", offset); + else + BIOSLOG(bios, "0x%04X: skipping following commands\n", offset); + + return 3; +} + +static int +init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_3B opcode: 0x3B ('') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): crtc index + * + */ + + uint8_t or = ffs(bios->display.output->or) - 1; + uint8_t index = bios->data[offset + 1]; + uint8_t data; + + if (!iexec->execute) + return 2; + + data = bios_idxprt_rd(bios, 0x3d4, index); + bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or)); + return 2; +} + +static int +init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_3C opcode: 0x3C ('') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): crtc index + * + */ + + uint8_t or = ffs(bios->display.output->or) - 1; + uint8_t index = bios->data[offset + 1]; + uint8_t data; + + if (!iexec->execute) + return 2; + + data = bios_idxprt_rd(bios, 0x3d4, index); + bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or)); + return 2; +} + +static int init_idx_addr_latched(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { @@ -1170,7 +1392,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return 0; + return -EINVAL; } freq = ROM32(bios->data[offset + 11 + config * 4]); @@ -1231,12 +1453,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; - int len = 4 + count * 3; struct nouveau_i2c_chan *chan; - struct i2c_msg msg; - int i; + int len = 4 + count * 3; + int ret, i; if (!iexec->execute) return len; @@ -1247,35 +1468,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) chan = init_i2c_device_find(bios->dev, i2c_index); if (!chan) - return 0; + return -ENODEV; for (i = 0; i < count; i++) { - uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; + uint8_t reg = bios->data[offset + 4 + i * 3]; uint8_t mask = bios->data[offset + 5 + i * 3]; uint8_t data = bios->data[offset + 6 + i * 3]; - uint8_t value; + union i2c_smbus_data val; - msg.addr = i2c_address; - msg.flags = I2C_M_RD; - msg.len = 1; - msg.buf = &value; - if (i2c_transfer(&chan->adapter, &msg, 1) != 1) - return 0; + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &val); + if (ret < 0) + return ret; BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " "Mask: 0x%02X, Data: 0x%02X\n", - offset, i2c_reg, value, mask, data); + offset, reg, val.byte, mask, data); - value = (value & mask) | data; + if (!bios->execute) + continue; - if (bios->execute) { - msg.addr = i2c_address; - msg.flags = 0; - msg.len = 1; - msg.buf = &value; - if (i2c_transfer(&chan->adapter, &msg, 1) != 1) - return 0; - } + val.byte &= mask; + val.byte |= data; + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &val); + if (ret < 0) + return ret; } return len; @@ -1301,12 +1521,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; - int len = 4 + count * 2; struct nouveau_i2c_chan *chan; - struct i2c_msg msg; - int i; + int len = 4 + count * 2; + int ret, i; if (!iexec->execute) return len; @@ -1317,23 +1536,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) chan = init_i2c_device_find(bios->dev, i2c_index); if (!chan) - return 0; + return -ENODEV; for (i = 0; i < count; i++) { - uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; - uint8_t data = bios->data[offset + 5 + i * 2]; + uint8_t reg = bios->data[offset + 4 + i * 2]; + union i2c_smbus_data val; + + val.byte = bios->data[offset + 5 + i * 2]; BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", - offset, i2c_reg, data); - - if (bios->execute) { - msg.addr = i2c_address; - msg.flags = 0; - msg.len = 1; - msg.buf = &data; - if (i2c_transfer(&chan->adapter, &msg, 1) != 1) - return 0; - } + offset, reg, val.byte); + + if (!bios->execute) + continue; + + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &val); + if (ret < 0) + return ret; } return len; @@ -1357,7 +1578,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; int len = 4 + count; struct nouveau_i2c_chan *chan; @@ -1374,7 +1595,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) chan = init_i2c_device_find(bios->dev, i2c_index); if (!chan) - return 0; + return -ENODEV; for (i = 0; i < count; i++) { data[i] = bios->data[offset + 4 + i]; @@ -1388,7 +1609,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) msg.len = count; msg.buf = data; if (i2c_transfer(&chan->adapter, &msg, 1) != 1) - return 0; + return -EIO; } return len; @@ -1427,7 +1648,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) reg = get_tmds_index_reg(bios->dev, mlv); if (!reg) - return 0; + return -EINVAL; bios_wr32(bios, reg, tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); @@ -1471,7 +1692,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, reg = get_tmds_index_reg(bios->dev, mlv); if (!reg) - return 0; + return -EINVAL; for (i = 0; i < count; i++) { uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; @@ -1946,7 +2167,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, uint32_t reg, data; if (bios->major_version > 2) - return 0; + return -ENODEV; bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); @@ -2001,7 +2222,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, int clock; if (bios->major_version > 2) - return 0; + return -ENODEV; clock = ROM16(bios->data[meminitoffs + 4]) * 10; setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); @@ -2034,7 +2255,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); if (bios->major_version > 2) - return 0; + return -ENODEV; bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX, cr3c); @@ -2573,48 +2794,37 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * each GPIO according to various values listed in each entry */ - const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; - const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr]; - const uint8_t *gpio_entry; int i; - if (!iexec->execute) - return 1; - - if (bios->dcb.version != 0x40) { - NV_ERROR(bios->dev, "DCB table not version 4.0\n"); - return 0; - } - - if (!bios->dcb.gpio_table_ptr) { - NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); - return 0; + if (dev_priv->card_type != NV_50) { + NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); + return -ENODEV; } - gpio_entry = gpio_table + gpio_table[1]; - for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { - uint32_t entry = ROM32(gpio_entry[0]), r, s, v; - int line = (entry & 0x0000001f); + if (!iexec->execute) + return 1; - BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); + for (i = 0; i < bios->dcb.gpio.entries; i++) { + struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; + uint32_t r, s, v; - if ((entry & 0x0000ff00) == 0x0000ff00) - continue; + BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); - r = nv50_gpio_reg[line >> 3]; - s = (line & 0x07) << 2; - v = bios_rd32(bios, r) & ~(0x00000003 << s); - if (entry & 0x01000000) - v |= (((entry & 0x60000000) >> 29) ^ 2) << s; - else - v |= (((entry & 0x18000000) >> 27) ^ 2) << s; - bios_wr32(bios, r, v); + BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", + offset, gpio->tag, gpio->state_default); + if (bios->execute) + nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); - r = nv50_gpio_ctl[line >> 4]; - s = (line & 0x0f); + /* The NVIDIA binary driver doesn't appear to actually do + * any of this, my VBIOS does however. + */ + /* Not a clue, needs de-magicing */ + r = nv50_gpio_ctl[gpio->line >> 4]; + s = (gpio->line & 0x0f); v = bios_rd32(bios, r) & ~(0x00010001 << s); - switch ((entry & 0x06000000) >> 25) { + switch ((gpio->entry & 0x06000000) >> 25) { case 1: v |= (0x00000001 << s); break; @@ -2670,7 +2880,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Zero block length - has the M table " "been parsed?\n", offset); - return 0; + return -EINVAL; } strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; @@ -2854,14 +3064,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_AUXCH: no active output\n"); - return 0; + return -EINVAL; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); - return 0; + return -ENODEV; } if (!iexec->execute) @@ -2874,7 +3084,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); - return 0; + return ret; } data &= bios->data[offset + 0]; @@ -2883,7 +3093,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); - return 0; + return ret; } } @@ -2913,14 +3123,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); - return 0; + return -EINVAL; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); - return 0; + return -ENODEV; } if (!iexec->execute) @@ -2931,7 +3141,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); if (ret) { NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); - return 0; + return ret; } } @@ -2948,6 +3158,9 @@ static struct init_tbl_entry itbl_entry[] = { { "INIT_COPY" , 0x37, init_copy }, { "INIT_NOT" , 0x38, init_not }, { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, + { "INIT_DP_CONDITION" , 0x3A, init_dp_condition }, + { "INIT_OP_3B" , 0x3B, init_op_3b }, + { "INIT_OP_3C" , 0x3C, init_op_3c }, { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, { "INIT_PLL2" , 0x4B, init_pll2 }, @@ -3015,7 +3228,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset, * is changed back to EXECUTE. */ - int count = 0, i, res; + int count = 0, i, ret; uint8_t id; /* @@ -3030,26 +3243,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset, for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) ; - if (itbl_entry[i].name) { - BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", - offset, itbl_entry[i].id, itbl_entry[i].name); - - /* execute eventual command handler */ - res = (*itbl_entry[i].handler)(bios, offset, iexec); - if (!res) - break; - /* - * Add the offset of the current command including all data - * of that command. The offset will then be pointing on the - * next op code. - */ - offset += res; - } else { + if (!itbl_entry[i].name) { NV_ERROR(bios->dev, "0x%04X: Init table command not found: " "0x%02X\n", offset, id); return -ENOENT; } + + BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset, + itbl_entry[i].id, itbl_entry[i].name); + + /* execute eventual command handler */ + ret = (*itbl_entry[i].handler)(bios, offset, iexec); + if (ret < 0) { + NV_ERROR(bios->dev, "0x%04X: Failed parsing init " + "table opcode: %s %d\n", offset, + itbl_entry[i].name, ret); + } + + if (ret <= 0) + break; + + /* + * Add the offset of the current command including all data + * of that command. The offset will then be pointing on the + * next op code. + */ + offset += ret; } if (offset >= bios->length) @@ -3198,7 +3418,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int struct nvbios *bios = &dev_priv->vbios; unsigned int outputset = (dcbent->or == 4) ? 1 : 0; uint16_t scriptptr = 0, clktable; - uint8_t clktableptr = 0; /* * For now we assume version 3.0 table - g80 support will need some @@ -3217,26 +3436,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); break; case LVDS_RESET: + clktable = bios->fp.lvdsmanufacturerpointer + 15; + if (dcbent->or == 4) + clktable += 8; + if (dcbent->lvdsconf.use_straps_for_mode) { if (bios->fp.dual_link) - clktableptr += 2; - if (bios->fp.BITbit1) - clktableptr++; + clktable += 4; + if (bios->fp.if_is_24bit) + clktable += 2; } else { /* using EDID */ - uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; - int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; + int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; if (bios->fp.dual_link) { - clktableptr += 2; - fallbackcmpval *= 2; + clktable += 4; + cmpval_24bit <<= 1; } - if (fallbackcmpval & fallback) - clktableptr++; + + if (bios->fp.strapless_is_24bit & cmpval_24bit) + clktable += 2; } - /* adding outputset * 8 may not be correct */ - clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); + clktable = ROM16(bios->data[clktable]); if (!clktable) { NV_ERROR(dev, "Pixel clock comparison table not found\n"); return -ENOENT; @@ -3638,37 +3860,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b *if_is_24bit = bios->data[lvdsofs] & 16; break; case 0x30: - /* - * My money would be on there being a 24 bit interface bit in - * this table, but I have no example of a laptop bios with a - * 24 bit panel to confirm that. Hence we shout loudly if any - * bit other than bit 0 is set (I've not even seen bit 1) - */ - if (bios->data[lvdsofs] > 1) - NV_ERROR(dev, - "You have a very unusual laptop display; please report it\n"); + case 0x40: /* * No sign of the "power off for reset" or "reset for panel * on" bits, but it's safer to assume we should */ bios->fp.power_off_for_reset = true; bios->fp.reset_after_pclk_change = true; + /* * It's ok lvdsofs is wrong for nv4x edid case; dual_link is - * over-written, and BITbit1 isn't used + * over-written, and if_is_24bit isn't used */ bios->fp.dual_link = bios->data[lvdsofs] & 1; - bios->fp.BITbit1 = bios->data[lvdsofs] & 2; - bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; - break; - case 0x40: - bios->fp.dual_link = bios->data[lvdsofs] & 1; bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; break; } + /* Dell Latitude D620 reports a too-high value for the dual-link + * transition freq, causing us to program the panel incorrectly. + * + * It doesn't appear the VBIOS actually uses its transition freq + * (90000kHz), instead it uses the "Number of LVDS channels" field + * out of the panel ID structure (http://www.spwg.org/). + * + * For the moment, a quirk will do :) + */ + if ((dev->pdev->device == 0x01d7) && + (dev->pdev->subsystem_vendor == 0x1028) && + (dev->pdev->subsystem_device == 0x01c2)) { + bios->fp.duallink_transition_clk = 80000; + } + /* set dual_link flag for EDID case */ if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); @@ -3680,7 +3905,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b static uint8_t * bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, - uint16_t record, int record_len, int record_nr) + uint16_t record, int record_len, int record_nr, + bool match_link) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->vbios; @@ -3688,12 +3914,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, uint16_t table; int i, v; + switch (dcbent->type) { + case OUTPUT_TMDS: + case OUTPUT_LVDS: + case OUTPUT_DP: + break; + default: + match_link = false; + break; + } + for (i = 0; i < record_nr; i++, record += record_len) { table = ROM16(bios->data[record]); if (!table) continue; entry = ROM32(bios->data[table]); + if (match_link) { + v = (entry & 0x00c00000) >> 22; + if (!(v & dcbent->sorconf.link)) + continue; + } + v = (entry & 0x000f0000) >> 16; if (!(v & dcbent->or)) continue; @@ -3735,7 +3977,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, *length = table[4]; return bios_output_config_match(dev, dcbent, bios->display.dp_table_ptr + table[1], - table[2], table[3]); + table[2], table[3], table[0] >= 0x21); } int @@ -3824,7 +4066,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, dcbent->type, dcbent->location, dcbent->or); otable = bios_output_config_match(dev, dcbent, table[1] + bios->display.script_table_ptr, - table[2], table[3]); + table[2], table[3], table[0] >= 0x21); if (!otable) { NV_ERROR(dev, "Couldn't find matching output script table\n"); return 1; @@ -4294,31 +4536,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims break; } -#if 0 /* for easy debugging */ - ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); - ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); - ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); - ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); - - ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); - ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); - ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); - ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); - - ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); - ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); - ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); - ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); - ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); - ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); - ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); - ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); - - ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); - ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); - - ErrorF("pll.refclk: %d\n", pll_lim->refclk); -#endif + NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); + NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); + NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); + NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); + NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); + NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); + NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); + NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); + if (pll_lim->vco2.maxfreq) { + NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); + NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); + NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); + NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); + NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); + NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); + NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); + NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); + } + if (!pll_lim->max_p) { + NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p); + NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias); + } else { + NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p); + NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p); + } + NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk); return 0; } @@ -4962,79 +5205,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) return 0; } -static int -read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) -{ - uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; - int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; - int recordoffset = 0, rdofs = 1, wrofs = 0; - uint8_t port_type = 0; - - if (!i2ctable) - return -EINVAL; - - if (dcb_version >= 0x30) { - if (i2ctable[0] != dcb_version) /* necessary? */ - NV_WARN(dev, - "DCB I2C table version mismatch (%02X vs %02X)\n", - i2ctable[0], dcb_version); - dcb_i2c_ver = i2ctable[0]; - headerlen = i2ctable[1]; - if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) - i2c_entries = i2ctable[2]; - else - NV_WARN(dev, - "DCB I2C table has more entries than indexable " - "(%d entries, max %d)\n", i2ctable[2], - DCB_MAX_NUM_I2C_ENTRIES); - entry_len = i2ctable[3]; - /* [4] is i2c_default_indices, read in parse_dcb_table() */ - } - /* - * It's your own fault if you call this function on a DCB 1.1 BIOS -- - * the test below is for DCB 1.2 - */ - if (dcb_version < 0x14) { - recordoffset = 2; - rdofs = 0; - wrofs = 1; - } - - if (index == 0xf) - return 0; - if (index >= i2c_entries) { - NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", - index, i2ctable[2]); - return -ENOENT; - } - if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { - NV_ERROR(dev, "DCB I2C entry invalid\n"); - return -EINVAL; - } - - if (dcb_i2c_ver >= 0x30) { - port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; - - /* - * Fixup for chips using same address offset for read and - * write. - */ - if (port_type == 4) /* seen on C51 */ - rdofs = wrofs = 1; - if (port_type >= 5) /* G80+ */ - rdofs = wrofs = 0; - } - - if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6) - NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); - - i2c->port_type = port_type; - i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; - i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; - - return 0; -} - static struct dcb_gpio_entry * new_gpio_entry(struct nvbios *bios) { @@ -5077,25 +5247,25 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) gpio->tag = tag; gpio->line = line; gpio->invert = flags != 4; + gpio->entry = ent; } static void parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) { + uint32_t entry = ROM32(bios->data[offset]); struct dcb_gpio_entry *gpio; - uint32_t ent = ROM32(bios->data[offset]); - uint8_t line = ent & 0x1f, - tag = ent >> 8 & 0xff; - if (tag == 0xff) + if ((entry & 0x0000ff00) == 0x0000ff00) return; gpio = new_gpio_entry(bios); - - /* Currently unused, we may need more fields parsed at some - * point. */ - gpio->tag = tag; - gpio->line = line; + gpio->tag = (entry & 0x0000ff00) >> 8; + gpio->line = (entry & 0x0000001f) >> 0; + gpio->state_default = (entry & 0x01000000) >> 24; + gpio->state[0] = (entry & 0x18000000) >> 27; + gpio->state[1] = (entry & 0x60000000) >> 29; + gpio->entry = entry; } static void @@ -5211,6 +5381,21 @@ divine_connector_type(struct nvbios *bios, int index) } static void +apply_dcb_connector_quirks(struct nvbios *bios, int idx) +{ + struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx]; + struct drm_device *dev = bios->dev; + + /* Gigabyte NX85T */ + if ((dev->pdev->device == 0x0421) && + (dev->pdev->subsystem_vendor == 0x1458) && + (dev->pdev->subsystem_device == 0x344c)) { + if (cte->type == DCB_CONNECTOR_HDMI_1) + cte->type = DCB_CONNECTOR_DVI_I; + } +} + +static void parse_dcb_connector_table(struct nvbios *bios) { struct drm_device *dev = bios->dev; @@ -5238,13 +5423,14 @@ parse_dcb_connector_table(struct nvbios *bios) entry = conntab + conntab[1]; cte = &ct->entry[0]; for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { + cte->index = i; if (conntab[3] == 2) cte->entry = ROM16(entry[0]); else cte->entry = ROM32(entry[0]); cte->type = (cte->entry & 0x000000ff) >> 0; - cte->index = (cte->entry & 0x00000f00) >> 8; + cte->index2 = (cte->entry & 0x00000f00) >> 8; switch (cte->entry & 0x00033000) { case 0x00001000: cte->gpio_tag = 0x07; @@ -5266,6 +5452,8 @@ parse_dcb_connector_table(struct nvbios *bios) if (cte->type == 0xff) continue; + apply_dcb_connector_quirks(bios, i); + NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", i, cte->entry, cte->type, cte->index, cte->gpio_tag); @@ -5287,10 +5475,16 @@ parse_dcb_connector_table(struct nvbios *bios) break; default: cte->type = divine_connector_type(bios, cte->index); - NV_WARN(dev, "unknown type, using 0x%02x", cte->type); + NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type); break; } + if (nouveau_override_conntype) { + int type = divine_connector_type(bios, cte->index); + if (type != cte->type) + NV_WARN(dev, " -> type 0x%02x\n", cte->type); + } + } } @@ -5364,12 +5558,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, entry->bus = (conn >> 16) & 0xf; entry->location = (conn >> 20) & 0x3; entry->or = (conn >> 24) & 0xf; - /* - * Normal entries consist of a single bit, but dual link has the - * next most significant bit set too - */ - entry->duallink_possible = - ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); switch (entry->type) { case OUTPUT_ANALOG: @@ -5453,6 +5641,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, break; } + if (dcb->version < 0x40) { + /* Normal entries consist of a single bit, but dual link has + * the next most significant bit set too + */ + entry->duallink_possible = + ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); + } else { + entry->duallink_possible = (entry->sorconf.link == 3); + } + /* unsure what DCB version introduces this, 3.0? */ if (conf & 0x100000) entry->i2c_upper_default = true; @@ -6036,6 +6234,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev) nouveau_i2c_fini(dev, entry); } +static bool +nouveau_bios_posted(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + bool was_locked; + unsigned htotal; + + if (dev_priv->chipset >= NV_50) { + if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && + NVReadVgaCrtc(dev, 0, 0x1a) == 0) + return false; + return true; + } + + was_locked = NVLockVgaCrtcs(dev, false); + htotal = NVReadVgaCrtc(dev, 0, 0x06); + htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; + htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; + htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; + htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; + NVLockVgaCrtcs(dev, was_locked); + return (htotal != 0); +} + int nouveau_bios_init(struct drm_device *dev) { @@ -6070,11 +6292,9 @@ nouveau_bios_init(struct drm_device *dev) bios->execute = false; /* ... unless card isn't POSTed already */ - if (dev_priv->card_type >= NV_10 && - NVReadVgaCrtc(dev, 0, 0x00) == 0 && - NVReadVgaCrtc(dev, 0, 0x1a) == 0) { + if (!nouveau_bios_posted(dev)) { NV_INFO(dev, "Adaptor not initialised\n"); - if (dev_priv->card_type < NV_50) { + if (dev_priv->card_type < NV_40) { NV_ERROR(dev, "Unable to POST this chipset\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 9f688aa9a655..adf4ec2d06c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -35,6 +35,7 @@ #define DCB_LOC_ON_CHIP 0 struct dcb_i2c_entry { + uint32_t entry; uint8_t port_type; uint8_t read, write; struct nouveau_i2c_chan *chan; @@ -49,6 +50,9 @@ struct dcb_gpio_entry { enum dcb_gpio_tag tag; int line; bool invert; + uint32_t entry; + uint8_t state_default; + uint8_t state[2]; }; struct dcb_gpio_table { @@ -72,9 +76,10 @@ enum dcb_connector_type { }; struct dcb_connector_table_entry { + uint8_t index; uint32_t entry; enum dcb_connector_type type; - uint8_t index; + uint8_t index2; uint8_t gpio_tag; }; @@ -266,7 +271,6 @@ struct nvbios { bool reset_after_pclk_change; bool dual_link; bool link_c_increment; - bool BITbit1; bool if_is_24bit; int duallink_transition_clk; uint8_t strapless_is_24bit; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 028719fddf76..6f3c19522377 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -34,6 +34,7 @@ #include "nouveau_dma.h" #include <linux/log2.h> +#include <linux/slab.h> static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) @@ -71,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev, * many small buffers. */ if (dev_priv->card_type == NV_50) { - uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; + uint32_t block_size = dev_priv->vram_size >> 15; int i; switch (tile_flags) { @@ -153,17 +154,17 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->placement.fpfn = 0; nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; - nouveau_bo_placement_set(nvbo, flags); + nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ttm_bo_type_device, &nvbo->placement, align, 0, false, NULL, size, nouveau_bo_del_ttm); - nvbo->channel = NULL; if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } + nvbo->channel = NULL; spin_lock(&dev_priv->ttm.bo_list_lock); list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); @@ -172,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, return 0; } +static void +set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +{ + *n = 0; + + if (type & TTM_PL_FLAG_VRAM) + pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + if (type & TTM_PL_FLAG_TT) + pl[(*n)++] = TTM_PL_FLAG_TT | flags; + if (type & TTM_PL_FLAG_SYSTEM) + pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; +} + void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) { - int n = 0; - - if (memtype & TTM_PL_FLAG_VRAM) - nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; - if (memtype & TTM_PL_FLAG_TT) - nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - if (memtype & TTM_PL_FLAG_SYSTEM) - nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; - nvbo->placement.placement = nvbo->placements; - nvbo->placement.busy_placement = nvbo->placements; - nvbo->placement.num_placement = n; - nvbo->placement.num_busy_placement = n; - - if (nvbo->pin_refcnt) { - while (n--) - nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; - } + struct ttm_placement *pl = &nvbo->placement; + uint32_t flags = TTM_PL_MASK_CACHING | + (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); + + pl->placement = nvbo->placements; + set_placement_list(nvbo->placements, &pl->num_placement, + type, flags); + + pl->busy_placement = nvbo->busy_placements; + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, + type | busy, flags); } int @@ -199,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret, i; + int ret; if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { NV_ERROR(nouveau_bdev(bo->bdev)->dev, @@ -215,11 +223,9 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) if (ret) goto out; - nouveau_bo_placement_set(nvbo, memtype); - for (i = 0; i < nvbo->placement.num_placement; i++) - nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + nouveau_bo_placement_set(nvbo, memtype, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -244,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret, i; + int ret; if (--nvbo->pin_refcnt) return 0; @@ -253,10 +259,9 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (ret) return ret; - for (i = 0; i < nvbo->placement.num_placement; i++) - nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -386,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - - man->io_addr = NULL; - man->io_offset = drm_get_resource_start(dev, 1); - man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > nouveau_mem_fb_amount(dev)) - man->io_size = nouveau_mem_fb_amount(dev); - man->gpu_offset = dev_priv->vm_vram_base; break; case TTM_PL_TT: switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; break; @@ -419,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, dev_priv->gart_info.type); return -EINVAL; } - - man->io_offset = dev_priv->gart_info.aper_base; - man->io_size = dev_priv->gart_info.aper_size; - man->io_addr = NULL; man->gpu_offset = dev_priv->vm_gart_base; break; default: @@ -439,11 +431,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, TTM_PL_FLAG_SYSTEM); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); break; } @@ -457,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - struct nouveau_bo *nvbo, bool evict, bool no_wait, + struct nouveau_bo *nvbo, bool evict, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct nouveau_fence *fence = NULL; @@ -468,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, return ret; ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, - evict, no_wait, new_mem); + evict, no_wait_reserve, no_wait_gpu, new_mem); if (nvbo->channel && nvbo->channel != chan) ret = nouveau_fence_wait(fence, NULL, false, false); nouveau_fence_unref((void *)&fence); @@ -492,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - int no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); @@ -570,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, dst_offset += (PAGE_SIZE * line_count); } - return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); + return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); } static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -588,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; @@ -596,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: if (tmp_mem.mm_node) { spin_lock(&bo->bdev->glob->lru_lock); @@ -613,7 +608,8 @@ out: static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -626,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); if (ret) goto out; @@ -701,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -716,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Software copy if the card isn't up and running yet. */ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || !dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } @@ -730,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Hardware assisted copy. */ if (new_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else if (old_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); if (!ret) goto out; /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: if (ret) @@ -757,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) return 0; } +static int +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct drm_device *dev = dev_priv->dev; + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* System memory */ + return 0; + case TTM_PL_TT: +#if __OS_HAS_AGP + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = dev_priv->gart_info.aper_base; + mem->bus.is_iomem = true; + } +#endif + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = drm_get_resource_start(dev, 1); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + +static int +nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + return 0; +} + struct ttm_bo_driver nouveau_bo_driver = { .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .invalidate_caches = nouveau_bo_invalidate_caches, @@ -769,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = { .sync_obj_flush = nouveau_fence_flush, .sync_obj_unref = nouveau_fence_unref, .sync_obj_ref = nouveau_fence_ref, + .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, + .io_mem_reserve = &nouveau_ttm_io_mem_reserve, + .io_mem_free = &nouveau_ttm_io_mem_free, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 6dfb425cbae9..1fc57ef58295 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, GFP_KERNEL); if (!dev_priv->fifos[channel]) return -ENOMEM; - dev_priv->fifo_alloc_count++; chan = dev_priv->fifos[channel]; INIT_LIST_HEAD(&chan->nvsw.vbl_wait); INIT_LIST_HEAD(&chan->fence.pending); @@ -321,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan) iounmap(chan->user); dev_priv->fifos[chan->id] = NULL; - dev_priv->fifo_alloc_count--; kfree(chan); } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 24327f468c4b..149ed224c3cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector) if (nv_encoder && nv_connector->native_mode) { unsigned status = connector_status_connected; -#ifdef CONFIG_ACPI +#if defined(CONFIG_ACPI_BUTTON) || \ + (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) if (!nouveau_ignorelid && !acpi_lid_open()) status = connector_status_unknown; #endif @@ -302,7 +303,7 @@ nouveau_connector_detect(struct drm_connector *connector) detect_analog: nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); - if (!nv_encoder) + if (!nv_encoder && !nouveau_tv_disable) nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); if (nv_encoder) { struct drm_encoder *encoder = to_drm_encoder(nv_encoder); @@ -431,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector, } static struct drm_display_mode * -nouveau_connector_native_mode(struct nouveau_connector *connector) +nouveau_connector_native_mode(struct drm_connector *connector) { - struct drm_device *dev = connector->base.dev; + struct drm_connector_helper_funcs *helper = connector->helper_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *largest = NULL; int high_w = 0, high_h = 0, high_v = 0; - /* Use preferred mode if there is one.. */ - list_for_each_entry(mode, &connector->base.probed_modes, head) { + list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { + if (helper->mode_valid(connector, mode) != MODE_OK) + continue; + + /* Use preferred mode if there is one.. */ if (mode->type & DRM_MODE_TYPE_PREFERRED) { NV_DEBUG_KMS(dev, "native mode from preferred\n"); return drm_mode_duplicate(dev, mode); } - } - /* Otherwise, take the resolution with the largest width, then height, - * then vertical refresh - */ - list_for_each_entry(mode, &connector->base.probed_modes, head) { + /* Otherwise, take the resolution with the largest width, then + * height, then vertical refresh + */ if (mode->hdisplay < high_w) continue; @@ -552,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) */ if (!nv_connector->native_mode) nv_connector->native_mode = - nouveau_connector_native_mode(nv_connector); + nouveau_connector_native_mode(connector); if (ret == 0 && nv_connector->native_mode) { struct drm_display_mode *mode; @@ -583,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, switch (nv_encoder->dcb->type) { case OUTPUT_LVDS: - BUG_ON(!nv_connector->native_mode); - if (mode->hdisplay > nv_connector->native_mode->hdisplay || - mode->vdisplay > nv_connector->native_mode->vdisplay) + if (nv_connector->native_mode && + (mode->hdisplay > nv_connector->native_mode->hdisplay || + mode->vdisplay > nv_connector->native_mode->vdisplay)) return MODE_PANEL; min_clock = 0; @@ -593,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, break; case OUTPUT_TMDS: if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || - (dev_priv->card_type < NV_50 && - !nv_encoder->dcb->duallink_possible)) + !nv_encoder->dcb->duallink_possible) max_clock = 165000; else max_clock = 330000; @@ -728,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, if (ret == 0) goto out; nv_connector->detected_encoder = nv_encoder; - nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); + nv_connector->native_mode = nouveau_connector_native_mode(connector); list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) drm_mode_remove(connector, mode); @@ -843,6 +846,7 @@ nouveau_connector_create(struct drm_device *dev, switch (dcb->type) { case DCB_CONNECTOR_VGA: + connector->polled = DRM_CONNECTOR_POLL_CONNECT; if (dev_priv->card_type >= NV_50) { drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, @@ -854,6 +858,17 @@ nouveau_connector_create(struct drm_device *dev, case DCB_CONNECTOR_TV_3: nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; break; + case DCB_CONNECTOR_DP: + case DCB_CONNECTOR_eDP: + case DCB_CONNECTOR_HDMI_0: + case DCB_CONNECTOR_HDMI_1: + case DCB_CONNECTOR_DVI_I: + case DCB_CONNECTOR_DVI_D: + if (dev_priv->card_type >= NV_50) + connector->polled = DRM_CONNECTOR_POLL_HPD; + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + /* fall-through */ default: nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index 49fa7b2d257e..cb1ce2a09162 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -40,6 +40,8 @@ struct nouveau_crtc { int sharpness; int last_dpms; + int cursor_saved_x, cursor_saved_y; + struct { int cpp; bool blanked; diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 8ff9ef5d4b47..7933de4aff2e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -33,6 +33,8 @@ #include "drmP.h" #include "nouveau_drv.h" +#include <ttm/ttm_page_alloc.h> + static int nouveau_debugfs_channel_info(struct seq_file *m, void *data) { @@ -137,10 +139,9 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_minor *minor = node->minor; - struct drm_device *dev = minor->dev; + struct drm_nouveau_private *dev_priv = minor->dev->dev_private; - seq_printf(m, "VRAM total: %dKiB\n", - (int)(nouveau_mem_fb_amount(dev) >> 10)); + seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); return 0; } @@ -160,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, + { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, }; #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cf1c5c0a0abe..74e6b4ed12c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -34,10 +34,6 @@ static void nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) { struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); - struct drm_device *dev = drm_fb->dev; - - if (drm_fb->fbdev) - nouveau_fbcon_remove(dev, drm_fb); if (fb->nvbo) drm_gem_object_unreference_unlocked(fb->nvbo->gem); @@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { .create_handle = nouveau_user_framebuffer_create_handle, }; -struct drm_framebuffer * -nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, - struct drm_mode_fb_cmd *mode_cmd) +int +nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, + struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo) { - struct nouveau_framebuffer *fb; int ret; - fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); - if (!fb) - return NULL; - - ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs); + ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs); if (ret) { - kfree(fb); - return NULL; + return ret; } - drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); - - fb->nvbo = nvbo; - return &fb->base; + drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd); + nouveau_fb->nvbo = nvbo; + return 0; } static struct drm_framebuffer * @@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd) { - struct drm_framebuffer *fb; + struct nouveau_framebuffer *nouveau_fb; struct drm_gem_object *gem; + int ret; gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); if (!gem) return NULL; - fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); - if (!fb) { + nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); + if (!nouveau_fb) + return NULL; + + ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); + if (ret) { drm_gem_object_unreference(gem); return NULL; } - return fb; + return &nouveau_fb->base; } const struct drm_mode_config_funcs nouveau_mode_config_funcs = { .fb_create = nouveau_user_framebuffer_create, - .fb_changed = nouveau_fbcon_probe, + .output_poll_changed = nouveau_fbcon_output_poll_changed, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index c8482a108a78..65c441a1999f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; + + DRM_MEMORYBARRIER(); + /* Flush writes. */ + nouveau_bo_rd32(pb, 0); + nvchan_wr32(chan, 0x8c, chan->dma.ib_put); chan->dma.ib_free--; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index f954ad93e81f..deeb21c6865c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); - for (;;) { + for (i = 0; i < 16; i++) { nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); @@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, break; } + if (i == 16) { + NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); + ret = -EREMOTEIO; + goto out; + } + if (cmd & 1) { if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { ret = -EREMOTEIO; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 30cc09e8a709..273770432298 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); int nouveau_nofbaccel = 0; module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); +MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); +int nouveau_override_conntype = 0; +module_param_named(override_conntype, nouveau_override_conntype, int, 0400); + +MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); +int nouveau_tv_disable = 0; +module_param_named(tv_disable, nouveau_tv_disable, int, 0400); + MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" @@ -145,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; struct drm_crtc *crtc; - uint32_t fbdev_flags; int ret, i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) @@ -154,9 +161,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) if (pm_state.event == PM_EVENT_PRETHAW) return 0; - fbdev_flags = dev_priv->fbdev_info->flags; - dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; + NV_INFO(dev, "Disabling fbcon acceleration...\n"); + nouveau_fbcon_save_disable_accel(dev); + NV_INFO(dev, "Unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -167,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) nouveau_bo_unpin(nouveau_fb->nvbo); } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_unpin(nv_crtc->cursor.nvbo); + } + NV_INFO(dev, "Evicting buffers...\n"); ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); @@ -220,9 +235,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) } acquire_console_sem(); - fb_set_suspend(dev_priv->fbdev_info, 1); + nouveau_fbcon_set_suspend(dev, 1); release_console_sem(); - dev_priv->fbdev_info->flags = fbdev_flags; + nouveau_fbcon_restore_accel(dev); return 0; out_abort: @@ -240,14 +255,12 @@ nouveau_pci_resume(struct pci_dev *pdev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; struct drm_crtc *crtc; - uint32_t fbdev_flags; int ret, i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; - fbdev_flags = dev_priv->fbdev_info->flags; - dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; + nouveau_fbcon_save_disable_accel(dev); NV_INFO(dev, "We're back, enabling device...\n"); pci_set_power_state(pdev, PCI_D0); @@ -308,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev) nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + NV_ERROR(dev, "Could not pin/map cursor.\n"); + } + if (dev_priv->card_type < NV_50) { nv04_display_restore(dev); NVLockVgaCrtcs(dev, false); } else nv50_display_init(dev); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_offset(nv_crtc, + nv_crtc->cursor.nvbo->bo.offset - + dev_priv->vm_vram_base); + + nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, + nv_crtc->cursor_saved_y); + } + /* Force CLUT to get re-loaded during modeset */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); @@ -322,13 +357,14 @@ nouveau_pci_resume(struct pci_dev *pdev) } acquire_console_sem(); - fb_set_suspend(dev_priv->fbdev_info, 0); + nouveau_fbcon_set_suspend(dev, 0); release_console_sem(); - nouveau_fbcon_zfill(dev); + nouveau_fbcon_zfill_all(dev); drm_helper_resume_force_mode(dev); - dev_priv->fbdev_info->flags = fbdev_flags; + + nouveau_fbcon_restore_accel(dev); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4b9aaf2a8d0f..c69719106489 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -76,6 +76,7 @@ struct nouveau_bo { struct ttm_buffer_object bo; struct ttm_placement placement; u32 placements[3]; + u32 busy_placements[3]; struct ttm_bo_kmap_obj kmap; struct list_head head; @@ -519,6 +520,7 @@ struct drm_nouveau_private { struct workqueue_struct *wq; struct work_struct irq_work; + struct work_struct hpd_work; struct list_head vbl_waiting; @@ -553,12 +555,6 @@ struct drm_nouveau_private { uint32_t ramro_offset; uint32_t ramro_size; - /* base physical addresses */ - uint64_t fb_phys; - uint64_t fb_available_size; - uint64_t fb_mappable_pages; - uint64_t fb_aper_free; - struct { enum { NOUVEAU_GART_NONE = 0, @@ -572,10 +568,6 @@ struct drm_nouveau_private { struct nouveau_gpuobj *sg_ctxdma; struct page *sg_dummy_page; dma_addr_t sg_dummy_bus; - - /* nottm hack */ - struct drm_ttm_backend *sg_be; - unsigned long sg_handle; } gart_info; /* nv10-nv40 tiling regions */ @@ -584,6 +576,16 @@ struct drm_nouveau_private { spinlock_t lock; } tile; + /* VRAM/fb configuration */ + uint64_t vram_size; + uint64_t vram_sys_base; + + uint64_t fb_phys; + uint64_t fb_available_size; + uint64_t fb_mappable_pages; + uint64_t fb_aper_free; + int fb_mtrr; + /* G8x/G9x virtual address space */ uint64_t vm_gart_base; uint64_t vm_gart_size; @@ -592,10 +594,6 @@ struct drm_nouveau_private { uint64_t vm_end; struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; - uint64_t vram_sys_base; - - /* the mtrr covering the FB */ - int fb_mtrr; struct mem_block *ramin_heap; @@ -614,11 +612,7 @@ struct drm_nouveau_private { uint32_t dac_users[4]; struct nouveau_suspend_resume { - uint32_t fifo_mode; - uint32_t graph_ctx_control; - uint32_t graph_state; uint32_t *ramin_copy; - uint64_t ramin_size; } susres; struct backlight_device *backlight; @@ -628,6 +622,9 @@ struct drm_nouveau_private { struct { struct dentry *channel_root; } debugfs; + + struct nouveau_fbdev *nfbdev; + struct apertures_struct *apertures; }; static inline struct drm_nouveau_private * @@ -681,6 +678,7 @@ extern int nouveau_uscript_tmds; extern int nouveau_vram_pushbuf; extern int nouveau_vram_notify; extern int nouveau_fbpercrtc; +extern int nouveau_tv_disable; extern char *nouveau_tv_norm; extern int nouveau_reg_debug; extern char *nouveau_vbios; @@ -688,6 +686,7 @@ extern int nouveau_ctxfw; extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; +extern int nouveau_override_conntype; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); @@ -715,7 +714,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, struct drm_file *, int tail); extern void nouveau_mem_takedown(struct mem_block **heap); extern void nouveau_mem_free_block(struct mem_block *); -extern uint64_t nouveau_mem_fb_amount(struct drm_device *); +extern int nouveau_mem_detect(struct drm_device *dev); extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); extern int nouveau_mem_init(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); @@ -852,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *); extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); /* nouveau_acpi.c */ +#define ROM_BIOS_PAGE 4096 #if defined(CONFIG_ACPI) void nouveau_register_dsm_handler(void); void nouveau_unregister_dsm_handler(void); +int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); +bool nouveau_acpi_rom_supported(struct pci_dev *pdev); #else static inline void nouveau_register_dsm_handler(void) {} static inline void nouveau_unregister_dsm_handler(void) {} +static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } +static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } #endif /* nouveau_backlight.c */ @@ -926,6 +930,10 @@ extern void nv40_fb_takedown(struct drm_device *); extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); +/* nv50_fb.c */ +extern int nv50_fb_init(struct drm_device *); +extern void nv50_fb_takedown(struct drm_device *); + /* nv04_fifo.c */ extern int nv04_fifo_init(struct drm_device *); extern void nv04_fifo_disable(struct drm_device *); @@ -1118,7 +1126,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); extern int nouveau_bo_unpin(struct nouveau_bo *); extern int nouveau_bo_map(struct nouveau_bo *); extern void nouveau_bo_unmap(struct nouveau_bo *); -extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); +extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, + uint32_t busy); extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); @@ -1162,6 +1171,16 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); +/* nv50_gpio.c */ +int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); +int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + +/* nv50_calc. */ +int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, + int *N1, int *M1, int *N2, int *M2, int *P); +int nv50_calc_pll2(struct drm_device *, struct pll_lims *, + int clk, int *N, int *fN, int *M, int *P); + #ifndef ioread32_native #ifdef __BIG_ENDIAN #define ioread16_native ioread16be diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index bc4a24029ed1..e1df8209cd0f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -47,6 +47,9 @@ struct nouveau_encoder { union { struct { + int mc_unknown; + uint32_t unk0; + uint32_t unk1; int dpcd_version; int link_nr; int link_bw; diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h index 4a3f31aa1949..d432134b71e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fb.h +++ b/drivers/gpu/drm/nouveau/nouveau_fb.h @@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb) extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; -struct drm_framebuffer * -nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, - struct drm_mode_fb_cmd *); - +int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, + struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo); #endif /* __NOUVEAU_FB_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 68cedd9194fe..257ea130ae13 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -30,7 +30,6 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> -#include <linux/slab.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> @@ -53,8 +52,8 @@ static int nouveau_fbcon_sync(struct fb_info *info) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; int ret, i; @@ -98,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -112,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = nv04_fbcon_fillrect, .fb_copyarea = nv04_fbcon_copyarea, .fb_imageblit = nv04_fbcon_imageblit, @@ -126,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = nv50_fbcon_fillrect, .fb_copyarea = nv50_fbcon_copyarea, .fb_imageblit = nv50_fbcon_imageblit, @@ -156,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = nv_crtc->lut.b[regno]; } -static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { - .gamma_set = nouveau_fbcon_gamma_set, - .gamma_get = nouveau_fbcon_gamma_get -}; - -#if defined(__i386__) || defined(__x86_64__) -static bool -nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) +static void +nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { - struct pci_dev *pdev = dev->pdev; - int ramin; - - if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB && - screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) - return false; - - if (screen_info.lfb_base < pci_resource_start(pdev, 1)) - goto not_fb; - - if (screen_info.lfb_base + screen_info.lfb_size >= - pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1)) - goto not_fb; - - return true; -not_fb: - ramin = 2; - if (pci_resource_len(pdev, ramin) == 0) { - ramin = 3; - if (pci_resource_len(pdev, ramin) == 0) - return false; - } - - if (screen_info.lfb_base < pci_resource_start(pdev, ramin)) - return false; - - if (screen_info.lfb_base + screen_info.lfb_size >= - pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin)) - return false; - - return true; -} -#endif - -void -nouveau_fbcon_zfill(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct fb_info *info = dev_priv->fbdev_info; + struct fb_info *info = nfbdev->helper.fbdev; struct fb_fillrect rect; /* Clear the entire fbcon. The drm will program every connector @@ -219,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev) } static int -nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, - uint32_t fb_height, uint32_t surface_width, - uint32_t surface_height, uint32_t surface_depth, - uint32_t surface_bpp, struct drm_framebuffer **pfb) +nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, + struct drm_fb_helper_surface_size *sizes) { + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct fb_info *info; - struct nouveau_fbcon_par *par; struct drm_framebuffer *fb; struct nouveau_framebuffer *nouveau_fb; struct nouveau_bo *nvbo; struct drm_mode_fb_cmd mode_cmd; - struct device *device = &dev->pdev->dev; + struct pci_dev *pdev = dev->pdev; + struct device *device = &pdev->dev; int size, ret; - mode_cmd.width = surface_width; - mode_cmd.height = surface_height; + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; - mode_cmd.bpp = surface_bpp; + mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); mode_cmd.pitch = roundup(mode_cmd.pitch, 256); - mode_cmd.depth = surface_depth; + mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = roundup(size, PAGE_SIZE); @@ -269,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, mutex_lock(&dev->struct_mutex); - fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); - if (!fb) { + info = framebuffer_alloc(0, device); + if (!info) { ret = -ENOMEM; - NV_ERROR(dev, "failed to allocate fb.\n"); goto out_unref; } - list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); - - nouveau_fb = nouveau_framebuffer(fb); - *pfb = fb; - - info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device); - if (!info) { + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { ret = -ENOMEM; goto out_unref; } - par = info->par; - par->helper.funcs = &nouveau_fbcon_helper_funcs; - par->helper.dev = dev; - ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); - if (ret) - goto out_unref; - dev_priv->fbdev_info = info; + info->par = nfbdev; + + nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); + + nouveau_fb = &nfbdev->nouveau_fb; + fb = &nouveau_fb->base; + + /* setup helper */ + nfbdev->helper.fb = fb; + nfbdev->helper.fbdev = info; strcpy(info->fix.id, "nouveaufb"); if (nouveau_nofbaccel) @@ -311,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, info->screen_size = size; drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); - drm_fb_helper_fill_var(info, fb, fb_width, fb_height); + drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); /* FIXME: we really shouldn't expose mmio space at all */ - info->fix.mmio_start = pci_resource_start(dev->pdev, 1); - info->fix.mmio_len = pci_resource_len(dev->pdev, 1); + info->fix.mmio_start = pci_resource_start(pdev, 1); + info->fix.mmio_len = pci_resource_len(pdev, 1); /* Set aperture base/size for vesafb takeover */ -#if defined(__i386__) || defined(__x86_64__) - if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { - /* Some NVIDIA VBIOS' are stupid and decide to put the - * framebuffer in the middle of the PRAMIN BAR for - * whatever reason. We need to know the exact lfb_base - * to get vesafb kicked off, and the only reliable way - * we have left is to find out lfb_base the same way - * vesafb did. - */ - info->aperture_base = screen_info.lfb_base; - info->aperture_size = screen_info.lfb_size; - if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) - info->aperture_size *= 65536; - } else -#endif - { - info->aperture_base = info->fix.mmio_start; - info->aperture_size = info->fix.mmio_len; + info->apertures = dev_priv->apertures; + if (!info->apertures) { + ret = -ENOMEM; + goto out_unref; } info->pixmap.size = 64*1024; @@ -344,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; - fb->fbdev = info; - - par->nouveau_fb = nouveau_fb; - par->dev = dev; - if (dev_priv->channel && !nouveau_nofbaccel) { switch (dev_priv->card_type) { case NV_50: @@ -362,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, }; } - nouveau_fbcon_zfill(dev); + nouveau_fbcon_zfill(dev, nfbdev); /* To allow resizeing without swapping buffers */ NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", @@ -380,44 +309,130 @@ out: return ret; } -int -nouveau_fbcon_probe(struct drm_device *dev) +static int +nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { - NV_DEBUG_KMS(dev, "\n"); + struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; + int new_fb = 0; + int ret; + + if (!helper->fb) { + ret = nouveau_fbcon_create(nfbdev, sizes); + if (ret) + return ret; + new_fb = 1; + } + return new_fb; +} - return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); +void +nouveau_fbcon_output_poll_changed(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); } int -nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) +nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { - struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); + struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; struct fb_info *info; - if (!fb) - return -EINVAL; - - info = fb->fbdev; - if (info) { - struct nouveau_fbcon_par *par = info->par; - + if (nfbdev->helper.fbdev) { + info = nfbdev->helper.fbdev; unregister_framebuffer(info); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; - if (par) - drm_fb_helper_free(&par->helper); - framebuffer_release(info); } - + drm_fb_helper_fini(&nfbdev->helper); + drm_framebuffer_cleanup(&nouveau_fb->base); return 0; } void nouveau_fbcon_gpu_lockup(struct fb_info *info) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); info->flags |= FBINFO_HWACCEL_DISABLED; } + +static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { + .gamma_set = nouveau_fbcon_gamma_set, + .gamma_get = nouveau_fbcon_gamma_get, + .fb_probe = nouveau_fbcon_find_or_create_single, +}; + + +int nouveau_fbcon_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fbdev *nfbdev; + int ret; + + nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); + if (!nfbdev) + return -ENOMEM; + + nfbdev->dev = dev; + dev_priv->nfbdev = nfbdev; + nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; + + ret = drm_fb_helper_init(dev, &nfbdev->helper, + nv_two_heads(dev) ? 2 : 1, 4); + if (ret) { + kfree(nfbdev); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&nfbdev->helper); + drm_fb_helper_initial_config(&nfbdev->helper, 32); + return 0; +} + +void nouveau_fbcon_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (!dev_priv->nfbdev) + return; + + nouveau_fbcon_destroy(dev, dev_priv->nfbdev); + kfree(dev_priv->nfbdev); + dev_priv->nfbdev = NULL; +} + +void nouveau_fbcon_save_disable_accel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; + dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; +} + +void nouveau_fbcon_restore_accel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; +} + +void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); +} + +void nouveau_fbcon_zfill_all(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + nouveau_fbcon_zfill(dev, dev_priv->nfbdev); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index f9c34e1a8c11..e7e12684c37e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -29,16 +29,16 @@ #include "drm_fb_helper.h" -struct nouveau_fbcon_par { +#include "nouveau_fb.h" +struct nouveau_fbdev { struct drm_fb_helper helper; + struct nouveau_framebuffer nouveau_fb; + struct list_head fbdev_list; struct drm_device *dev; - struct nouveau_framebuffer *nouveau_fb; + unsigned int saved_flags; }; -int nouveau_fbcon_probe(struct drm_device *dev); -int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); void nouveau_fbcon_restore(void); -void nouveau_fbcon_zfill(struct drm_device *dev); void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); @@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info); + +int nouveau_fbcon_init(struct drm_device *dev); +void nouveau_fbcon_fini(struct drm_device *dev); +void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); +void nouveau_fbcon_zfill_all(struct drm_device *dev); +void nouveau_fbcon_save_disable_accel(struct drm_device *dev); +void nouveau_fbcon_restore_accel(struct drm_device *dev); + +void nouveau_fbcon_output_poll_changed(struct drm_device *dev); #endif /* __NV50_FBCON_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0d22f66f1c79..69c76cf93407 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) } ttm_bo_unref(&bo); + + drm_gem_object_release(gem); + kfree(gem); } int @@ -180,40 +183,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, { struct nouveau_bo *nvbo = gem->driver_private; struct ttm_buffer_object *bo = &nvbo->bo; - uint64_t flags; + uint32_t domains = valid_domains & + (write_domains ? write_domains : read_domains); + uint32_t pref_flags = 0, valid_flags = 0; - if (!valid_domains || (!read_domains && !write_domains)) + if (!domains) return -EINVAL; - if (write_domains) { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (write_domains & NOUVEAU_GEM_DOMAIN_GART)) - flags = TTM_PL_FLAG_TT; - else - return -EINVAL; - } else { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - bo->mem.mem_type == TTM_PL_VRAM) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (read_domains & NOUVEAU_GEM_DOMAIN_GART) && - bo->mem.mem_type == TTM_PL_TT) - flags = TTM_PL_FLAG_TT; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - flags = TTM_PL_FLAG_TT; - } + if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + valid_flags |= TTM_PL_FLAG_VRAM; + + if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) + valid_flags |= TTM_PL_FLAG_TT; + + if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && + bo->mem.mem_type == TTM_PL_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && + bo->mem.mem_type == TTM_PL_TT) + pref_flags |= TTM_PL_FLAG_TT; + + else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else + pref_flags |= TTM_PL_FLAG_TT; + + nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); - nouveau_bo_placement_set(nvbo, flags); return 0; } @@ -387,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, nvbo->channel = chan; ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - false, false); + false, false, false); nvbo->channel = NULL; if (unlikely(ret)) { NV_ERROR(dev, "fail ttm_validate\n"); diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index c7ebec696747..f731c5f60536 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c @@ -23,6 +23,7 @@ */ #include <linux/firmware.h> +#include <linux/slab.h> #include "drmP.h" #include "nouveau_drv.h" @@ -67,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev) return ret; } - pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); + pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!pgraph->ctxprog) { NV_ERROR(dev, "OOM copying ctxprog\n"); release_firmware(fw); return -ENOMEM; } - memcpy(pgraph->ctxprog, fw->data, fw->size); cp = pgraph->ctxprog; if (le32_to_cpu(cp->signature) != 0x5043564e || @@ -96,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev) return ret; } - pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); + pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!pgraph->ctxvals) { NV_ERROR(dev, "OOM copying ctxvals\n"); release_firmware(fw); nouveau_grctx_fini(dev); return -ENOMEM; } - memcpy(pgraph->ctxvals, fw->data, fw->size); cv = (void *)pgraph->ctxvals; if (le32_to_cpu(cv->signature) != 0x5643564e || diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 88583e7bf651..316a3c7e6eb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -254,16 +254,27 @@ struct nouveau_i2c_chan * nouveau_i2c_find(struct drm_device *dev, int index) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->vbios; + struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; if (index >= DCB_MAX_NUM_I2C_ENTRIES) return NULL; - if (!bios->dcb.i2c[index].chan) { - if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index)) - return NULL; + if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { + uint32_t reg = 0xe500, val; + + if (i2c->port_type == 6) { + reg += i2c->read * 0x50; + val = 0x2002; + } else { + reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; + val = 0xe001; + } + + nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); } - return bios->dcb.i2c[index].chan; + if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) + return NULL; + return i2c->chan; } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 95220ddebb45..53360f156063 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev) if (dev_priv->card_type == NV_50) { INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); + INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); INIT_LIST_HEAD(&dev_priv->vbl_waiting); } } @@ -311,6 +312,31 @@ nouveau_print_bitfield_names_(uint32_t value, #define nouveau_print_bitfield_names(val, namelist) \ nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) +struct nouveau_enum_names { + uint32_t value; + const char *name; +}; + +static void +nouveau_print_enum_names_(uint32_t value, + const struct nouveau_enum_names *namelist, + const int namelist_len) +{ + /* + * Caller must have already printed the KERN_* log level for us. + * Also the caller is responsible for adding the newline. + */ + int i; + for (i = 0; i < namelist_len; ++i) { + if (value == namelist[i].value) { + printk("%s", namelist[i].name); + return; + } + } + printk("unknown value 0x%08x", value); +} +#define nouveau_print_enum_names(val, namelist) \ + nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) static int nouveau_graph_chid_from_grctx(struct drm_device *dev) @@ -427,14 +453,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t nsource = trap->nsource, nstatus = trap->nstatus; - NV_INFO(dev, "%s - nSource:", id); - nouveau_print_bitfield_names(nsource, nsource_names); - printk(", nStatus:"); - if (dev_priv->card_type < NV_10) - nouveau_print_bitfield_names(nstatus, nstatus_names); - else - nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); - printk("\n"); + if (dev_priv->card_type < NV_50) { + NV_INFO(dev, "%s - nSource:", id); + nouveau_print_bitfield_names(nsource, nsource_names); + printk(", nStatus:"); + if (dev_priv->card_type < NV_10) + nouveau_print_bitfield_names(nstatus, nstatus_names); + else + nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); + printk("\n"); + } NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " "Data 0x%08x:0x%08x\n", @@ -578,27 +606,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) } static void +nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t trap[6]; + int i, ch; + uint32_t idx = nv_rd32(dev, 0x100c90); + if (idx & 0x80000000) { + idx &= 0xffffff; + if (display) { + for (i = 0; i < 6; i++) { + nv_wr32(dev, 0x100c90, idx | i << 24); + trap[i] = nv_rd32(dev, 0x100c94); + } + for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { + struct nouveau_channel *chan = dev_priv->fifos[ch]; + + if (!chan || !chan->ramin) + continue; + + if (trap[1] == chan->ramin->instance >> 12) + break; + } + NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", + name, (trap[5]&0x100?"read":"write"), + trap[5]&0xff, trap[4]&0xffff, + trap[3]&0xffff, trap[0], trap[2], ch); + } + nv_wr32(dev, 0x100c90, idx | 0x80000000); + } else if (display) { + NV_INFO(dev, "%s - no VM fault?\n", name); + } +} + +static struct nouveau_enum_names nv50_mp_exec_error_names[] = +{ + { 3, "STACK_UNDERFLOW" }, + { 4, "QUADON_ACTIVE" }, + { 8, "TIMEOUT" }, + { 0x10, "INVALID_OPCODE" }, + { 0x40, "BREAKPOINT" }, +}; + +static void +nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t units = nv_rd32(dev, 0x1540); + uint32_t addr, mp10, status, pc, oplow, ophigh; + int i; + int mps = 0; + for (i = 0; i < 4; i++) { + if (!(units & 1 << (i+24))) + continue; + if (dev_priv->chipset < 0xa0) + addr = 0x408200 + (tpid << 12) + (i << 7); + else + addr = 0x408100 + (tpid << 11) + (i << 7); + mp10 = nv_rd32(dev, addr + 0x10); + status = nv_rd32(dev, addr + 0x14); + if (!status) + continue; + if (display) { + nv_rd32(dev, addr + 0x20); + pc = nv_rd32(dev, addr + 0x24); + oplow = nv_rd32(dev, addr + 0x70); + ophigh= nv_rd32(dev, addr + 0x74); + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " + "TP %d MP %d: ", tpid, i); + nouveau_print_enum_names(status, + nv50_mp_exec_error_names); + printk(" at %06x warp %d, opcode %08x %08x\n", + pc&0xffffff, pc >> 24, + oplow, ophigh); + } + nv_wr32(dev, addr + 0x10, mp10); + nv_wr32(dev, addr + 0x14, 0); + mps++; + } + if (!mps && display) + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " + "No MPs claiming errors?\n", tpid); +} + +static void +nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, + uint32_t ustatus_new, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int tps = 0; + uint32_t units = nv_rd32(dev, 0x1540); + int i, r; + uint32_t ustatus_addr, ustatus; + for (i = 0; i < 16; i++) { + if (!(units & (1 << i))) + continue; + if (dev_priv->chipset < 0xa0) + ustatus_addr = ustatus_old + (i << 12); + else + ustatus_addr = ustatus_new + (i << 11); + ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; + if (!ustatus) + continue; + tps++; + switch (type) { + case 6: /* texture error... unknown for now */ + nv50_pfb_vm_trap(dev, display, name); + if (display) { + NV_ERROR(dev, "magic set %d:\n", i); + for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + } + break; + case 7: /* MP error */ + if (ustatus & 0x00010000) { + nv50_pgraph_mp_trap(dev, i, display); + ustatus &= ~0x00010000; + } + break; + case 8: /* TPDMA error */ + { + uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); + uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); + uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); + uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); + uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); + uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); + uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); + nv50_pfb_vm_trap(dev, display, name); + /* 2d engine destination */ + if (ustatus & 0x00000010) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000010; + } + /* Render target */ + if (ustatus & 0x00000040) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000040; + } + /* CUDA memory: l[], g[] or stack. */ + if (ustatus & 0x00000080) { + if (display) { + if (e18 & 0x80000000) { + /* g[] read fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 24) & 0x1f)); + e18 &= ~0x1f000000; + } else if (e18 & 0xc) { + /* g[] write fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 7) & 0x1f)); + e18 &= ~0x00000f80; + } else { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", + i, e14, e10); + } + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000080; + } + } + break; + } + if (ustatus) { + if (display) + NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + } + nv_wr32(dev, ustatus_addr, 0xc0000000); + } + + if (!tps && display) + NV_INFO(dev, "%s - No TPs claiming errors?\n", name); +} + +static void +nv50_pgraph_trap_handler(struct drm_device *dev) +{ + struct nouveau_pgraph_trap trap; + uint32_t status = nv_rd32(dev, 0x400108); + uint32_t ustatus; + int display = nouveau_ratelimit(); + + + if (!status && display) { + nouveau_graph_trap_info(dev, &trap); + nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); + NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); + } + + /* DISPATCH: Relays commands to other units and handles NOTIFY, + * COND, QUERY. If you get a trap from it, the command is still stuck + * in DISPATCH and you need to do something about it. */ + if (status & 0x001) { + ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); + } + + /* Known to be triggered by screwed up NOTIFY and COND... */ + if (ustatus & 0x00000001) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); + nv_wr32(dev, 0x400500, 0); + if (nv_rd32(dev, 0x400808) & 0x80000000) { + if (display) { + if (nouveau_graph_trapped_channel(dev, &trap.channel)) + trap.channel = -1; + trap.class = nv_rd32(dev, 0x400814); + trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; + trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; + trap.data = nv_rd32(dev, 0x40080c); + trap.data2 = nv_rd32(dev, 0x400810); + nouveau_graph_dump_trap_info(dev, + "PGRAPH_TRAP_DISPATCH_FAULT", &trap); + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); + } + nv_wr32(dev, 0x400808, 0); + } else if (display) { + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); + } + nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); + nv_wr32(dev, 0x400848, 0); + ustatus &= ~0x00000001; + } + if (ustatus & 0x00000002) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); + nv_wr32(dev, 0x400500, 0); + if (nv_rd32(dev, 0x40084c) & 0x80000000) { + if (display) { + if (nouveau_graph_trapped_channel(dev, &trap.channel)) + trap.channel = -1; + trap.class = nv_rd32(dev, 0x400814); + trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; + trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; + trap.data = nv_rd32(dev, 0x40085c); + trap.data2 = 0; + nouveau_graph_dump_trap_info(dev, + "PGRAPH_TRAP_DISPATCH_QUERY", &trap); + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); + } + nv_wr32(dev, 0x40084c, 0); + } else if (display) { + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); + } + ustatus &= ~0x00000002; + } + if (ustatus && display) + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); + nv_wr32(dev, 0x400804, 0xc0000000); + nv_wr32(dev, 0x400108, 0x001); + status &= ~0x001; + } + + /* TRAPs other than dispatch use the "normal" trap regs. */ + if (status && display) { + nouveau_graph_trap_info(dev, &trap); + nouveau_graph_dump_trap_info(dev, + "PGRAPH_TRAP", &trap); + } + + /* M2MF: Memory to memory copy engine. */ + if (status & 0x002) { + ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); + } + if (ustatus & 0x00000001) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); + ustatus &= ~0x00000001; + } + if (ustatus & 0x00000002) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); + ustatus &= ~0x00000002; + } + if (ustatus & 0x00000004) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); + ustatus &= ~0x00000004; + } + NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", + nv_rd32(dev, 0x406804), + nv_rd32(dev, 0x406808), + nv_rd32(dev, 0x40680c), + nv_rd32(dev, 0x406810)); + if (ustatus && display) + NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 2); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x406800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x002); + status &= ~0x002; + } + + /* VFETCH: Fetches data from vertex buffers. */ + if (status & 0x004) { + ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); + } + if (ustatus & 0x00000001) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); + NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", + nv_rd32(dev, 0x400c00), + nv_rd32(dev, 0x400c08), + nv_rd32(dev, 0x400c0c), + nv_rd32(dev, 0x400c10)); + ustatus &= ~0x00000001; + } + if (ustatus && display) + NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); + nv_wr32(dev, 0x400c04, 0xc0000000); + nv_wr32(dev, 0x400108, 0x004); + status &= ~0x004; + } + + /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ + if (status & 0x008) { + ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); + } + if (ustatus & 0x00000001) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); + NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", + nv_rd32(dev, 0x401804), + nv_rd32(dev, 0x401808), + nv_rd32(dev, 0x40180c), + nv_rd32(dev, 0x401810)); + ustatus &= ~0x00000001; + } + if (ustatus && display) + NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 0x80); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x401800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x008); + status &= ~0x008; + } + + /* CCACHE: Handles code and c[] caches and fills them. */ + if (status & 0x010) { + ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); + } + if (ustatus & 0x00000001) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); + NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", + nv_rd32(dev, 0x405800), + nv_rd32(dev, 0x405804), + nv_rd32(dev, 0x405808), + nv_rd32(dev, 0x40580c), + nv_rd32(dev, 0x405810), + nv_rd32(dev, 0x405814), + nv_rd32(dev, 0x40581c)); + ustatus &= ~0x00000001; + } + if (ustatus && display) + NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); + nv_wr32(dev, 0x405018, 0xc0000000); + nv_wr32(dev, 0x400108, 0x010); + status &= ~0x010; + } + + /* Unknown, not seen yet... 0x402000 is the only trap status reg + * remaining, so try to handle it anyway. Perhaps related to that + * unknown DMA slot on tesla? */ + if (status & 0x20) { + nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); + ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; + if (display) + NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); + nv_wr32(dev, 0x402000, 0xc0000000); + /* no status modifiction on purpose */ + } + + /* TEXTURE: CUDA texturing units */ + if (status & 0x040) { + nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, + "PGRAPH_TRAP_TEXTURE"); + nv_wr32(dev, 0x400108, 0x040); + status &= ~0x040; + } + + /* MP: CUDA execution engines. */ + if (status & 0x080) { + nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, + "PGRAPH_TRAP_MP"); + nv_wr32(dev, 0x400108, 0x080); + status &= ~0x080; + } + + /* TPDMA: Handles TP-initiated uncached memory accesses: + * l[], g[], stack, 2d surfaces, render targets. */ + if (status & 0x100) { + nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, + "PGRAPH_TRAP_TPDMA"); + nv_wr32(dev, 0x400108, 0x100); + status &= ~0x100; + } + + if (status) { + if (display) + NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", + status); + nv_wr32(dev, 0x400108, status); + } +} + +/* There must be a *lot* of these. Will take some time to gather them up. */ +static struct nouveau_enum_names nv50_data_error_names[] = +{ + { 4, "INVALID_VALUE" }, + { 5, "INVALID_ENUM" }, + { 8, "INVALID_OBJECT" }, + { 0xc, "INVALID_BITFIELD" }, + { 0x28, "MP_NO_REG_SPACE" }, + { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, +}; + +static void nv50_pgraph_irq_handler(struct drm_device *dev) { + struct nouveau_pgraph_trap trap; + int unhandled = 0; uint32_t status; while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { - uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - + /* NOTIFY: You've set a NOTIFY an a command and it's done. */ if (status & 0x00000001) { - nouveau_pgraph_intr_notify(dev, nsource); + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_NOTIFY", &trap); status &= ~0x00000001; nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); } - if (status & 0x00000010) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); + /* COMPUTE_QUERY: Purpose and exact cause unknown, happens + * when you write 0x200 to 0x50c0 method 0x31c. */ + if (status & 0x00000002) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_COMPUTE_QUERY", &trap); + status &= ~0x00000002; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); + } + + /* Unknown, never seen: 0x4 */ + /* ILLEGAL_MTHD: You used a wrong method for this class. */ + if (status & 0x00000010) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_pgraph_intr_swmthd(dev, &trap)) + unhandled = 1; + if (unhandled && nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_ILLEGAL_MTHD", &trap); status &= ~0x00000010; nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); } + /* ILLEGAL_CLASS: You used a wrong class. */ + if (status & 0x00000020) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_ILLEGAL_CLASS", &trap); + status &= ~0x00000020; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); + } + + /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ + if (status & 0x00000040) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_DOUBLE_NOTIFY", &trap); + status &= ~0x00000040; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); + } + + /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ if (status & 0x00001000) { nv_wr32(dev, 0x400500, 0x00000000); nv_wr32(dev, NV03_PGRAPH_INTR, @@ -613,49 +1116,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev) status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; } - if (status & 0x00100000) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_DATA_ERROR); + /* BUFFER_NOTIFY: Your m2mf transfer finished */ + if (status & 0x00010000) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_BUFFER_NOTIFY", &trap); + status &= ~0x00010000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); + } + /* DATA_ERROR: Invalid value for this method, or invalid + * state in current PGRAPH context for this operation */ + if (status & 0x00100000) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) { + nouveau_graph_dump_trap_info(dev, + "PGRAPH_DATA_ERROR", &trap); + NV_INFO (dev, "PGRAPH_DATA_ERROR - "); + nouveau_print_enum_names(nv_rd32(dev, 0x400110), + nv50_data_error_names); + printk("\n"); + } status &= ~0x00100000; nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); } + /* TRAP: Something bad happened in the middle of command + * execution. Has a billion types, subtypes, and even + * subsubtypes. */ if (status & 0x00200000) { - int r; - - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); - - NV_ERROR(dev, "magic set 1:\n"); - for (r = 0x408900; r <= 0x408910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - nv_wr32(dev, 0x408900, - nv_rd32(dev, 0x408904) | 0xc0000000); - for (r = 0x408e08; r <= 0x408e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - nv_wr32(dev, 0x408e08, - nv_rd32(dev, 0x408e08) | 0xc0000000); - - NV_ERROR(dev, "magic set 2:\n"); - for (r = 0x409900; r <= 0x409910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - nv_wr32(dev, 0x409900, - nv_rd32(dev, 0x409904) | 0xc0000000); - for (r = 0x409e08; r <= 0x409e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - nv_wr32(dev, 0x409e08, - nv_rd32(dev, 0x409e08) | 0xc0000000); - + nv50_pgraph_trap_handler(dev); status &= ~0x00200000; - nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); } + /* Unknown, never seen: 0x00400000 */ + + /* SINGLE_STEP: Happens on every method if you turned on + * single stepping in 40008c */ + if (status & 0x01000000) { + nouveau_graph_trap_info(dev, &trap); + if (nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, + "PGRAPH_SINGLE_STEP", &trap); + status &= ~0x01000000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); + } + + /* 0x02000000 happens when you pause a ctxprog... + * but the only way this can happen that I know is by + * poking the relevant MMIO register, and we don't + * do that. */ + if (status) { NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); @@ -672,7 +1185,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev) } nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); + if (nv_rd32(dev, 0x400824) & (1 << 31)) + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); } static void @@ -690,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t status, fbdev_flags = 0; + uint32_t status; unsigned long flags; status = nv_rd32(dev, NV03_PMC_INTR_0); @@ -699,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - if (dev_priv->fbdev_info) { - fbdev_flags = dev_priv->fbdev_info->flags; - dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; - } - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { nouveau_fifo_irq_handler(dev); status &= ~NV_PMC_INTR_0_PFIFO_PENDING; @@ -733,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) if (status) NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); - if (dev_priv->fbdev_info) - dev_priv->fbdev_info->flags = fbdev_flags; - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return IRQ_HANDLED; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2dc09dbd817d..c1fd42b0dad1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, return -EBUSY; } + nv_wr32(dev, 0x100c80, 0x00040001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00060001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + return 0; } @@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return; + } + + nv_wr32(dev, 0x100c80, 0x00040001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return; + } + + nv_wr32(dev, 0x100c80, 0x00060001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); } } @@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev) } } -/*XXX won't work on BSD because of pci_read_config_dword */ static uint32_t -nouveau_mem_fb_amount_igp(struct drm_device *dev) +nouveau_mem_detect_nv04(struct drm_device *dev) +{ + uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); + + if (boot0 & 0x00000100) + return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; + + switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { + case NV04_BOOT_0_RAM_AMOUNT_32MB: + return 32 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_16MB: + return 16 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_8MB: + return 8 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_4MB: + return 4 * 1024 * 1024; + } + + return 0; +} + +static uint32_t +nouveau_mem_detect_nforce(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; @@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) return 0; } - if (dev_priv->flags&NV_NFORCE) { + if (dev_priv->flags & NV_NFORCE) { pci_read_config_dword(bridge, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } else - if (dev_priv->flags&NV_NFORCE2) { + if (dev_priv->flags & NV_NFORCE2) { pci_read_config_dword(bridge, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } @@ -477,50 +526,33 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) } /* returns the amount of FB ram in bytes */ -uint64_t nouveau_mem_fb_amount(struct drm_device *dev) +int +nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t boot0; - - switch (dev_priv->card_type) { - case NV_04: - boot0 = nv_rd32(dev, NV03_BOOT_0); - if (boot0 & 0x00000100) - return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; - - switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { - case NV04_BOOT_0_RAM_AMOUNT_32MB: - return 32 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_16MB: - return 16 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_8MB: - return 8 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_4MB: - return 4 * 1024 * 1024; - } - break; - case NV_10: - case NV_20: - case NV_30: - case NV_40: - case NV_50: - default: - if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { - return nouveau_mem_fb_amount_igp(dev); - } else { - uint64_t mem; - mem = (nv_rd32(dev, NV04_FIFO_DATA) & - NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> - NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; - return mem * 1024 * 1024; - } - break; + + if (dev_priv->card_type == NV_04) { + dev_priv->vram_size = nouveau_mem_detect_nv04(dev); + } else + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { + dev_priv->vram_size = nouveau_mem_detect_nforce(dev); + } else { + dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); + dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); + dev_priv->vram_sys_base <<= 12; } - NV_ERROR(dev, - "Unable to detect video ram size. Please report your setup to " - DRIVER_EMAIL "\n"); - return 0; + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); + if (dev_priv->vram_sys_base) { + NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", + dev_priv->vram_sys_base); + } + + if (dev_priv->vram_size) + return 0; + return -ENOMEM; } #if __OS_HAS_AGP @@ -631,15 +663,12 @@ nouveau_mem_init(struct drm_device *dev) spin_lock_init(&dev_priv->ttm.bo_list_lock); spin_lock_init(&dev_priv->tile.lock); - dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); - + dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); - /* remove reserved space at end of vram from available amount */ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index aa9b310e41be..6ca80a3fe70d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -826,6 +826,7 @@ #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 #define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) #define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) #define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index ed1590577b6c..1d6ee8b55154 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -1,6 +1,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include <linux/pagemap.h> +#include <linux/slab.h> #define NV_CTXDMA_PAGE_SHIFT 12 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) @@ -171,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be) } dev_priv->engine.instmem.finish_access(nvbe->dev); + if (dev_priv->card_type == NV_50) { + nv_wr32(dev, 0x100c80, 0x00050001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00000001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + } + nvbe->bound = false; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index eb8f084d5f53..b02a231d6937 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -24,6 +24,7 @@ */ #include <linux/swab.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_sarea.h" @@ -33,9 +34,9 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +#include "nouveau_fbcon.h" #include "nv50_display.h" -static int nouveau_stub_init(struct drm_device *dev) { return 0; } static void nouveau_stub_takedown(struct drm_device *dev) {} static int nouveau_init_engine_ptrs(struct drm_device *dev) @@ -277,8 +278,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.init = nv04_timer_init; engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; - engine->fb.init = nouveau_stub_init; - engine->fb.takedown = nouveau_stub_takedown; + engine->fb.init = nv50_fb_init; + engine->fb.takedown = nv50_fb_takedown; engine->graph.grclass = nv50_graph_grclass; engine->graph.init = nv50_graph_init; engine->graph.takedown = nv50_graph_takedown; @@ -341,7 +342,7 @@ nouveau_card_init_channel(struct drm_device *dev) gpuobj = NULL; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, - 0, nouveau_mem_fb_amount(dev), + 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, &gpuobj); if (ret) @@ -375,12 +376,15 @@ out_err: static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { + struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); nouveau_pci_resume(pdev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); + drm_kms_helper_poll_disable(dev); nouveau_pci_suspend(pdev, pmm); } } @@ -427,6 +431,10 @@ nouveau_card_init(struct drm_device *dev) goto out; } + ret = nouveau_mem_detect(dev); + if (ret) + goto out_bios; + ret = nouveau_gpuobj_early_init(dev); if (ret) goto out_bios; @@ -502,7 +510,7 @@ nouveau_card_init(struct drm_device *dev) else ret = nv04_display_create(dev); if (ret) - goto out_irq; + goto out_channel; } ret = nouveau_backlight_init(dev); @@ -511,11 +519,18 @@ nouveau_card_init(struct drm_device *dev) dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_helper_initial_config(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + nouveau_fbcon_init(dev); + drm_kms_helper_poll_init(dev); + } return 0; +out_channel: + if (dev_priv->channel) { + nouveau_channel_free(dev_priv->channel); + dev_priv->channel = NULL; + } out_irq: drm_irq_uninstall(dev); out_fifo: @@ -533,6 +548,7 @@ out_mc: out_gpuobj: nouveau_gpuobj_takedown(dev); out_mem: + nouveau_sgdma_takedown(dev); nouveau_mem_close(dev); out_instmem: engine->instmem.takedown(dev); @@ -553,6 +569,7 @@ static void nouveau_card_takedown(struct drm_device *dev) NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { + nouveau_backlight_exit(dev); if (dev_priv->channel) { @@ -627,6 +644,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) #endif } +static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + struct apertures_struct *aper = alloc_apertures(3); + if (!aper) + return NULL; + + aper->ranges[0].base = pci_resource_start(pdev, 1); + aper->ranges[0].size = pci_resource_len(pdev, 1); + aper->count = 1; + + if (pci_resource_len(pdev, 2)) { + aper->ranges[aper->count].base = pci_resource_start(pdev, 2); + aper->ranges[aper->count].size = pci_resource_len(pdev, 2); + aper->count++; + } + + if (pci_resource_len(pdev, 3)) { + aper->ranges[aper->count].base = pci_resource_start(pdev, 3); + aper->ranges[aper->count].size = pci_resource_len(pdev, 3); + aper->count++; + } + + return aper; +} + +static int nouveau_remove_conflicting_drivers(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + bool primary = false; + dev_priv->apertures = nouveau_get_apertures(dev); + if (!dev_priv->apertures) + return -ENOMEM; + +#ifdef CONFIG_X86 + primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + + remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); + return 0; +} + int nouveau_load(struct drm_device *dev, unsigned long flags) { struct drm_nouveau_private *dev_priv; @@ -714,29 +773,30 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", dev_priv->card_type, reg0); - /* map larger RAMIN aperture on NV40 cards */ - dev_priv->ramin = NULL; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + int ret = nouveau_remove_conflicting_drivers(dev); + if (ret) + return ret; + } + + /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ if (dev_priv->card_type >= NV_40) { int ramin_bar = 2; if (pci_resource_len(dev->pdev, ramin_bar) == 0) ramin_bar = 3; dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); - dev_priv->ramin = ioremap( - pci_resource_start(dev->pdev, ramin_bar), + dev_priv->ramin = + ioremap(pci_resource_start(dev->pdev, ramin_bar), dev_priv->ramin_size); if (!dev_priv->ramin) { - NV_ERROR(dev, "Failed to init RAMIN mapping, " - "limited instance memory available\n"); + NV_ERROR(dev, "Failed to PRAMIN BAR"); + return -ENOMEM; } - } - - /* On older cards (or if the above failed), create a map covering - * the BAR0 PRAMIN aperture */ - if (!dev_priv->ramin) { + } else { dev_priv->ramin_size = 1 * 1024 * 1024; dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, - dev_priv->ramin_size); + dev_priv->ramin_size); if (!dev_priv->ramin) { NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); return -ENOMEM; @@ -784,6 +844,8 @@ int nouveau_unload(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_kms_helper_poll_fini(dev); + nouveau_fbcon_fini(dev); if (dev_priv->card_type >= NV_50) nv50_display_destroy(dev); else @@ -849,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_VM_VRAM_BASE: getparam->value = dev_priv->vm_vram_base; break; + case NOUVEAU_GETPARAM_PTIMER_TIME: + getparam->value = dev_priv->engine.timer.read(dev); + break; case NOUVEAU_GETPARAM_GRAPH_UNITS: /* NV40 and NV50 versions are quite different, but register * address is the same. User is supposed to know the card diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index a1d1ebb073d9..eba687f1099e 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) struct drm_framebuffer *fb = crtc->fb; /* Calculate our timings */ - int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; - int horizStart = (mode->crtc_hsync_start >> 3) - 1; - int horizEnd = (mode->crtc_hsync_end >> 3) - 1; + int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; + int horizStart = (mode->crtc_hsync_start >> 3) + 1; + int horizEnd = (mode->crtc_hsync_end >> 3) + 1; int horizTotal = (mode->crtc_htotal >> 3) - 5; int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c index 89a91b9d8b25..aaf3de3bc816 100644 --- a/drivers/gpu/drm/nouveau/nv04_cursor.c +++ b/drivers/gpu/drm/nouveau/nv04_cursor.c @@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) static void nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) { + nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, NV_PRAMDAC_CU_START_POS, XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 3da90c2c4e63..1eeac4fae73d 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -30,8 +30,8 @@ void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; @@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; @@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; uint32_t fg; @@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) return; } - width = ALIGN(image->width, 32); - dsize = (width * image->height) >> 5; + width = ALIGN(image->width, 8); + dsize = ALIGN(width * image->height, 32) >> 5; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); while (dsize) { @@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) int nv04_fbcon_accel_init(struct fb_info *info) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; const int sub = NvSubCtxSurf2D; @@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? + ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? 0x009f : 0x005f, NvImageBlit); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index e260986ea65a..618355e9cdd5 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, return 0; } -static int -nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +/* + * Software methods, why they are needed, and how they all work: + * + * NV04 and NV05 keep most of the state in PGRAPH context itself, but some + * 2d engine settings are kept inside the grobjs themselves. The grobjs are + * 3 words long on both. grobj format on NV04 is: + * + * word 0: + * - bits 0-7: class + * - bit 12: color key active + * - bit 13: clip rect active + * - bit 14: if set, destination surface is swizzled and taken from buffer 5 + * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken + * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or + * NV03_CONTEXT_SURFACE_DST]. + * - bits 15-17: 2d operation [aka patch config] + * - bit 24: patch valid [enables rendering using this object] + * - bit 25: surf3d valid [for tex_tri and multitex_tri only] + * word 1: + * - bits 0-1: mono format + * - bits 8-13: color format + * - bits 16-31: DMA_NOTIFY instance + * word 2: + * - bits 0-15: DMA_A instance + * - bits 16-31: DMA_B instance + * + * On NV05 it's: + * + * word 0: + * - bits 0-7: class + * - bit 12: color key active + * - bit 13: clip rect active + * - bit 14: if set, destination surface is swizzled and taken from buffer 5 + * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken + * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or + * NV03_CONTEXT_SURFACE_DST]. + * - bits 15-17: 2d operation [aka patch config] + * - bits 20-22: dither mode + * - bit 24: patch valid [enables rendering using this object] + * - bit 25: surface_dst/surface_color/surf2d/surf3d valid + * - bit 26: surface_src/surface_zeta valid + * - bit 27: pattern valid + * - bit 28: rop valid + * - bit 29: beta1 valid + * - bit 30: beta4 valid + * word 1: + * - bits 0-1: mono format + * - bits 8-13: color format + * - bits 16-31: DMA_NOTIFY instance + * word 2: + * - bits 0-15: DMA_A instance + * - bits 16-31: DMA_B instance + * + * NV05 will set/unset the relevant valid bits when you poke the relevant + * object-binding methods with object of the proper type, or with the NULL + * type. It'll only allow rendering using the grobj if all needed objects + * are bound. The needed set of objects depends on selected operation: for + * example rop object is needed by ROP_AND, but not by SRCCOPY_AND. + * + * NV04 doesn't have these methods implemented at all, and doesn't have the + * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24 + * is set. So we have to emulate them in software, internally keeping the + * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04, + * but the last word isn't actually used for anything, we abuse it for this + * purpose. + * + * Actually, NV05 can optionally check bit 24 too, but we disable this since + * there's no use for it. + * + * For unknown reasons, NV04 implements surf3d binding in hardware as an + * exception. Also for unknown reasons, NV04 doesn't implement the clipping + * methods on the surf3d object, so we have to emulate them too. + */ + +static void +nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) { struct drm_device *dev = chan->dev; uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; @@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, uint32_t tmp; tmp = nv_ri32(dev, instance); - tmp &= ~0x00038000; - tmp |= ((data & 7) << 15); + tmp &= ~mask; + tmp |= value; nv_wi32(dev, instance, tmp); nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); +} + +static void +nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) +{ + struct drm_device *dev = chan->dev; + uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + uint32_t tmp, ctx1; + int class, op, valid = 1; + + ctx1 = nv_ri32(dev, instance); + class = ctx1 & 0xff; + op = (ctx1 >> 15) & 7; + tmp = nv_ri32(dev, instance + 0xc); + tmp &= ~mask; + tmp |= value; + nv_wi32(dev, instance + 0xc, tmp); + + /* check for valid surf2d/surf_dst/surf_color */ + if (!(tmp & 0x02000000)) + valid = 0; + /* check for valid surf_src/surf_zeta */ + if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000)) + valid = 0; + + switch (op) { + /* SRCCOPY_AND, SRCCOPY: no extra objects required */ + case 0: + case 3: + break; + /* ROP_AND: requires pattern and rop */ + case 1: + if (!(tmp & 0x18000000)) + valid = 0; + break; + /* BLEND_AND: requires beta1 */ + case 2: + if (!(tmp & 0x20000000)) + valid = 0; + break; + /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */ + case 4: + case 5: + if (!(tmp & 0x40000000)) + valid = 0; + break; + } + + nv04_graph_set_ctx1(chan, 0x01000000, valid << 24); +} + +static int +nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + if (data > 5) + return 1; + /* Old versions of the objects only accept first three operations. */ + if (data > 2 && grclass < 0x40) + return 1; + nv04_graph_set_ctx1(chan, 0x00038000, data << 15); + /* changing operation changes set of objects needed for validation */ + nv04_graph_set_ctx_val(chan, 0, 0); + return 0; +} + +static int +nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + uint32_t min = data & 0xffff, max; + uint32_t w = data >> 16; + if (min & 0x8000) + /* too large */ + return 1; + if (w & 0x8000) + /* yes, it accepts negative for some reason. */ + w |= 0xffff0000; + max = min + w; + max &= 0x3ffff; + nv_wr32(chan->dev, 0x40053c, min); + nv_wr32(chan->dev, 0x400544, max); + return 0; +} + +static int +nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + uint32_t min = data & 0xffff, max; + uint32_t w = data >> 16; + if (min & 0x8000) + /* too large */ + return 1; + if (w & 0x8000) + /* yes, it accepts negative for some reason. */ + w |= 0xffff0000; + max = min + w; + max &= 0x3ffff; + nv_wr32(chan->dev, 0x400540, min); + nv_wr32(chan->dev, 0x400548, max); return 0; } +static int +nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx1(chan, 0x00004000, 0); + nv04_graph_set_ctx_val(chan, 0x02000000, 0); + return 0; + case 0x42: + nv04_graph_set_ctx1(chan, 0x00004000, 0); + nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx1(chan, 0x00004000, 0); + nv04_graph_set_ctx_val(chan, 0x02000000, 0); + return 0; + case 0x42: + nv04_graph_set_ctx1(chan, 0x00004000, 0); + nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); + return 0; + case 0x52: + nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000); + nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x08000000, 0); + return 0; + case 0x18: + nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x08000000, 0); + return 0; + case 0x44: + nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x10000000, 0); + return 0; + case 0x43: + nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x20000000, 0); + return 0; + case 0x12: + nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x40000000, 0); + return 0; + case 0x72: + nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x02000000, 0); + return 0; + case 0x58: + nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x04000000, 0); + return 0; + case 0x59: + nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x02000000, 0); + return 0; + case 0x5a: + nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx_val(chan, 0x04000000, 0); + return 0; + case 0x5b: + nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx1(chan, 0x2000, 0); + return 0; + case 0x19: + nv04_graph_set_ctx1(chan, 0x2000, 0x2000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: + nv04_graph_set_ctx1(chan, 0x1000, 0); + return 0; + /* Yes, for some reason even the old versions of objects + * accept 0x57 and not 0x17. Consistency be damned. + */ + case 0x57: + nv04_graph_set_ctx1(chan, 0x1000, 0x1000); + return 0; + } + return 1; +} + static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { { 0x0150, nv04_graph_mthd_set_ref }, {} }; -static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { + { 0x0184, nv04_graph_mthd_bind_nv01_patt }, + { 0x0188, nv04_graph_mthd_bind_rop }, + { 0x018c, nv04_graph_mthd_bind_beta1 }, + { 0x0190, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { + { 0x0188, nv04_graph_mthd_bind_nv04_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { + { 0x0184, nv04_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_nv01_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x019c, nv04_graph_mthd_bind_surf_src }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { + { 0x0184, nv04_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_nv04_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { + { 0x0188, nv04_graph_mthd_bind_chroma }, + { 0x018c, nv04_graph_mthd_bind_clip }, + { 0x0190, nv04_graph_mthd_bind_nv04_patt }, + { 0x0194, nv04_graph_mthd_bind_rop }, + { 0x0198, nv04_graph_mthd_bind_beta1 }, + { 0x019c, nv04_graph_mthd_bind_beta4 }, + { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, + { 0x03e4, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { + { 0x0184, nv04_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_nv01_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { + { 0x0184, nv04_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_nv01_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, { 0x02fc, nv04_graph_mthd_set_operation }, {}, }; +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { + { 0x0184, nv04_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_nv04_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { + { 0x0188, nv04_graph_mthd_bind_nv01_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x0304, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { + { 0x0188, nv04_graph_mthd_bind_nv04_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, + { 0x0304, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { + { 0x0184, nv04_graph_mthd_bind_clip }, + { 0x0188, nv04_graph_mthd_bind_nv01_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { + { 0x0184, nv04_graph_mthd_bind_clip }, + { 0x0188, nv04_graph_mthd_bind_nv04_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { + { 0x0188, nv04_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_surf_color }, + { 0x0190, nv04_graph_mthd_bind_surf_zeta }, + {}, +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { + { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, + { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, + {}, +}; + struct nouveau_pgraph_object_class nv04_graph_grclass[] = { - { 0x0039, false, NULL }, - { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ - { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ - { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ - { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */ + { 0x0038, false, NULL }, /* dvd subpicture */ + { 0x0039, false, NULL }, /* m2mf */ + { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ + { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ + { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ + { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ + { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ + { 0x0064, false, NULL }, /* nv05 iifc */ + { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ + { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ + { 0x0065, false, NULL }, /* nv05 ifc */ + { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ + { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ + { 0x0066, false, NULL }, /* nv05 sifc */ + { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ + { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ { 0x0030, false, NULL }, /* null */ { 0x0042, false, NULL }, /* surf2d */ { 0x0043, false, NULL }, /* rop */ { 0x0012, false, NULL }, /* beta1 */ { 0x0072, false, NULL }, /* beta4 */ { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ + { 0x0018, false, NULL }, /* nv01 pattern */ + { 0x0044, false, NULL }, /* nv04 pattern */ { 0x0052, false, NULL }, /* swzsurf */ - { 0x0053, false, NULL }, /* surf3d */ + { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ + { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ { 0x0054, false, NULL }, /* tex_tri */ { 0x0055, false, NULL }, /* multitex_tri */ + { 0x0017, false, NULL }, /* nv01 chroma */ + { 0x0057, false, NULL }, /* nv04 chroma */ + { 0x0058, false, NULL }, /* surf_dst */ + { 0x0059, false, NULL }, /* surf_src */ + { 0x005a, false, NULL }, /* surf_color */ + { 0x005b, false, NULL }, /* surf_zeta */ + { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ + { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ + { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ + { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ + { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ + { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ { 0x506e, true, nv04_graph_mthds_sw }, {} }; diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 6b2ef4a9fce1..500ccfd3a0b8 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -278,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) default: nv_wr32(dev, 0x2230, 0); nv_wr32(dev, NV40_PFIFO_RAMFC, - ((nouveau_mem_fb_amount(dev) - 512 * 1024 + + ((dev_priv->vram_size - 512 * 1024 + dev_priv->ramfc_offset) >> 16) | (3 << 16)); break; } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 53e8afe1dcd1..704a25d04ac9 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev) if (!dev_priv->engine.graph.ctxprog) { struct nouveau_grctx ctx = {}; - uint32_t cp[256]; + uint32_t *cp; + + cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); + if (!cp) + return -ENOMEM; ctx.dev = dev; ctx.mode = NOUVEAU_GRCTX_PROG; @@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + + kfree(cp); } /* No context present currently */ @@ -335,6 +341,27 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, 0x400b38, 0x2ffff800); nv_wr32(dev, 0x400b3c, 0x00006000); + /* Tiling related stuff. */ + switch (dev_priv->chipset) { + case 0x44: + case 0x4a: + nv_wr32(dev, 0x400bc4, 0x1003d888); + nv_wr32(dev, 0x400bbc, 0xb7a7b500); + break; + case 0x46: + nv_wr32(dev, 0x400bc4, 0x0000e024); + nv_wr32(dev, 0x400bbc, 0xb7a7b520); + break; + case 0x4c: + case 0x4e: + case 0x67: + nv_wr32(dev, 0x400bc4, 0x1003d888); + nv_wr32(dev, 0x400bbc, 0xb7a7b540); + break; + default: + break; + } + /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) nv40_graph_set_region_tiling(dev, i, 0, 0, 0); diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c index 11b11c31f543..9b5c97469588 100644 --- a/drivers/gpu/drm/nouveau/nv40_grctx.c +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c @@ -115,11 +115,6 @@ /* TODO: * - get vs count from 0x1540 - * - document unimplemented bits compared to nvidia - * - nsource handling - * - R0 & 0x0200 handling - * - single-vs handling - * - 400314 bit 0 */ static int diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c new file mode 100644 index 000000000000..2cdc2bfe7179 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_calc.c @@ -0,0 +1,87 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "drm_fixed.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +int +nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, + int *N1, int *M1, int *N2, int *M2, int *P) +{ + struct nouveau_pll_vals pll_vals; + int ret; + + ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals); + if (ret <= 0) + return ret; + + *N1 = pll_vals.N1; + *M1 = pll_vals.M1; + *N2 = pll_vals.N2; + *M2 = pll_vals.M2; + *P = pll_vals.log2P; + return ret; +} + +int +nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, + int *N, int *fN, int *M, int *P) +{ + fixed20_12 fb_div, a, b; + + *P = pll->vco1.maxfreq / clk; + if (*P > pll->max_p) + *P = pll->max_p; + if (*P < pll->min_p) + *P = pll->min_p; + + /* *M = ceil(refclk / pll->vco.max_inputfreq); */ + a.full = dfixed_const(pll->refclk); + b.full = dfixed_const(pll->vco1.max_inputfreq); + a.full = dfixed_div(a, b); + a.full = dfixed_ceil(a); + *M = dfixed_trunc(a); + + /* fb_div = (vco * *M) / refclk; */ + fb_div.full = dfixed_const(clk * *P); + fb_div.full = dfixed_mul(fb_div, a); + a.full = dfixed_const(pll->refclk); + fb_div.full = dfixed_div(fb_div, a); + + /* *N = floor(fb_div); */ + a.full = dfixed_floor(fb_div); + *N = dfixed_trunc(fb_div); + + /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ + b.full = dfixed_const(8192); + a.full = dfixed_mul(a, b); + fb_div.full = dfixed_mul(fb_div, b); + fb_div.full = fb_div.full - a.full; + *fN = dfixed_trunc(fb_div) - 4096; + *fN &= 0xffff; + + return clk; +} diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index cfabeb974a56..b4e4a3b05eae 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) int nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) { - uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); - struct nouveau_pll_vals pll; - struct pll_lims limits; + uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); + struct pll_lims pll; uint32_t reg1, reg2; - int ret; + int ret, N1, M1, N2, M2, P; - ret = get_pll_limits(dev, pll_reg, &limits); + ret = get_pll_limits(dev, reg, &pll); if (ret) return ret; - ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); - if (ret <= 0) - return ret; + if (pll.vco2.maxfreq) { + ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); + if (ret <= 0) + return 0; + + NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", + pclk, ret, N1, M1, N2, M2, P); - if (limits.vco2.maxfreq) { - reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; - reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; - nv_wr32(dev, pll_reg, 0x10000611); - nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); - nv_wr32(dev, pll_reg + 8, - reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2); + reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; + reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; + nv_wr32(dev, reg, 0x10000611); + nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); + nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); } else { - reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; - nv_wr32(dev, pll_reg, 0x50000610); - nv_wr32(dev, pll_reg + 4, reg1 | - (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); + ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); + if (ret <= 0) + return 0; + + NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", + pclk, ret, N1, N2, M1, P); + + reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; + nv_wr32(dev, reg, 0x50000610); + nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); + nv_wr32(dev, reg + 8, N2); } return 0; diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 753e723adb3a..03ad7ab14f09 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) { struct drm_device *dev = nv_crtc->base.dev; + nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), ((y & 0xFFFF) << 16) | (x & 0xFFFF)); /* Needed to make the cursor move. */ diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 61a89f2dc553..580a5d10be93 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -29,6 +29,7 @@ #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_fb.h" +#include "nouveau_fbcon.h" #include "drm_crtc_helper.h" static void @@ -143,7 +144,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) } ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, - 0, nouveau_mem_fb_amount(dev)); + 0, dev_priv->vram_size); if (ret) { nv50_evo_channel_del(pchan); return ret; @@ -231,7 +232,7 @@ nv50_display_init(struct drm_device *dev) /* This used to be in crtc unblank, but seems out of place there. */ nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); /* RAM is clamped to 256 MiB. */ - ram_amount = nouveau_mem_fb_amount(dev); + ram_amount = dev_priv->vram_size; NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); if (ram_amount > 256*1024*1024) ram_amount = 256*1024*1024; @@ -522,15 +523,17 @@ int nv50_display_create(struct drm_device *dev) } for (i = 0 ; i < dcb->connector.entries; i++) { - if (i != 0 && dcb->connector.entry[i].index == - dcb->connector.entry[i - 1].index) + if (i != 0 && dcb->connector.entry[i].index2 == + dcb->connector.entry[i - 1].index2) continue; nouveau_connector_create(dev, &dcb->connector.entry[i]); } ret = nv50_display_init(dev); - if (ret) + if (ret) { + nv50_display_destroy(dev); return ret; + } return 0; } @@ -781,6 +784,37 @@ ack: } static void +nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) +{ + int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); + struct drm_encoder *encoder; + uint32_t tmp, unk0 = 0, unk1 = 0; + + if (dcb->type != OUTPUT_DP) + return; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->dcb == dcb) { + unk0 = nv_encoder->dp.unk0; + unk1 = nv_encoder->dp.unk1; + break; + } + } + + if (unk0 || unk1) { + tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); + tmp &= 0xfffffe03; + nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0); + + tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); + tmp &= 0xfef080c0; + nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1); + } +} + +static void nv50_display_unk20_handler(struct drm_device *dev) { struct dcb_entry *dcbent; @@ -803,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev) nouveau_bios_run_display_table(dev, dcbent, script, pclk); + nv50_display_unk20_dp_hack(dev, dcbent); + tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); tmp &= ~0x000000f; nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); @@ -885,10 +921,12 @@ nv50_display_error_handler(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); } -static void -nv50_display_irq_hotplug(struct drm_device *dev) +void +nv50_display_irq_hotplug_bh(struct work_struct *work) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = + container_of(work, struct drm_nouveau_private, hpd_work); + struct drm_device *dev = dev_priv->dev; struct drm_connector *connector; const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; uint32_t unplug_mask, plug_mask, change_mask; @@ -941,6 +979,8 @@ nv50_display_irq_hotplug(struct drm_device *dev) nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); if (dev_priv->chipset >= 0x90) nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); + + drm_helper_hpd_irq_event(dev); } void @@ -949,8 +989,10 @@ nv50_display_irq_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t delayed = 0; - while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) - nv50_display_irq_hotplug(dev); + if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { + if (!work_pending(&dev_priv->hpd_work)) + queue_work(dev_priv->wq, &dev_priv->hpd_work); + } while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index 3ae8d0725f63..581d405ac014 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -37,6 +37,7 @@ void nv50_display_irq_handler(struct drm_device *dev); void nv50_display_irq_handler_bh(struct work_struct *work); +void nv50_display_irq_hotplug_bh(struct work_struct *work); int nv50_display_init(struct drm_device *dev); int nv50_display_create(struct drm_device *dev); int nv50_display_destroy(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c new file mode 100644 index 000000000000..32611bd30e6d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -0,0 +1,38 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv50_fb_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* Not a clue what this is exactly. Without pointing it at a + * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) + * cause IOMMU "read from address 0" errors (rh#561267) + */ + nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); + + /* This is needed to get meaningful information from 100c90 + * on traps. No idea what these values mean exactly. */ + switch (dev_priv->chipset) { + case 0x50: + nv_wr32(dev, 0x100c90, 0x0707ff); + break; + case 0xa5: + case 0xa8: + nv_wr32(dev, 0x100c90, 0x0d0fff); + break; + default: + nv_wr32(dev, 0x100c90, 0x1d07ff); + break; + } + + return 0; +} + +void +nv50_fb_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 993c7126fbde..6bf025c6fc6f 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -6,8 +6,8 @@ void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; @@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; @@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; uint32_t width, dwords, *data = (uint32_t *)image->data; @@ -152,13 +152,16 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) int nv50_fbcon_accel_init(struct fb_info *info) { - struct nouveau_fbcon_par *par = info->par; - struct drm_device *dev = par->dev; + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; struct nouveau_gpuobj *eng2d = NULL; + uint64_t fb; int ret, format; + fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; + switch (info->var.bits_per_pixel) { case 8: format = 0xf3; @@ -233,7 +236,7 @@ nv50_fbcon_accel_init(struct fb_info *info) BEGIN_RING(chan, NvSub2D, 0x0808, 3); OUT_RING(chan, 0); OUT_RING(chan, 0); - OUT_RING(chan, 0); + OUT_RING(chan, 1); BEGIN_RING(chan, NvSub2D, 0x081c, 1); OUT_RING(chan, 1); BEGIN_RING(chan, NvSub2D, 0x0840, 4); @@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, 0); - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + - dev_priv->vm_vram_base); + OUT_RING(chan, upper_32_bits(fb)); + OUT_RING(chan, lower_32_bits(fb)); BEGIN_RING(chan, NvSub2D, 0x0230, 2); OUT_RING(chan, format); OUT_RING(chan, 1); @@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, 0); - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + - dev_priv->vm_vram_base); + OUT_RING(chan, upper_32_bits(fb)); + OUT_RING(chan, lower_32_bits(fb)); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/nouveau/nv50_gpio.c index 3d4d84e078ac..bb47ad737267 100644 --- a/drivers/gpu/drm/radeon/radeon_fixed.h +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -1,5 +1,5 @@ /* - * Copyright 2009 Red Hat Inc. + * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,49 +19,58 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Dave Airlie + * Authors: Ben Skeggs */ -#ifndef RADEON_FIXED_H -#define RADEON_FIXED_H -typedef union rfixed { - u32 full; -} fixed20_12; +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" - -#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ -#define rfixed_const_half(A) (u32)(((A) << 12) + 2048) -#define rfixed_const_666(A) (u32)(((A) << 12) + 2731) -#define rfixed_const_8(A) (u32)(((A) << 12) + 3277) -#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) -#define fixed_init(A) { .full = rfixed_const((A)) } -#define fixed_init_half(A) { .full = rfixed_const_half((A)) } -#define rfixed_trunc(A) ((A).full >> 12) - -static inline u32 rfixed_floor(fixed20_12 A) +static int +nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) { - u32 non_frac = rfixed_trunc(A); + const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; - return rfixed_const(non_frac); + if (gpio->line >= 32) + return -EINVAL; + + *reg = nv50_gpio_reg[gpio->line >> 3]; + *shift = (gpio->line & 7) << 2; + return 0; } -static inline u32 rfixed_ceil(fixed20_12 A) +int +nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) { - u32 non_frac = rfixed_trunc(A); + struct dcb_gpio_entry *gpio; + uint32_t r, s, v; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; - if (A.full > rfixed_const(non_frac)) - return rfixed_const(non_frac + 1); - else - return rfixed_const(non_frac); + if (nv50_gpio_location(gpio, &r, &s)) + return -EINVAL; + + v = nv_rd32(dev, r) >> (s + 2); + return ((v & 1) == (gpio->state[1] & 1)); } -static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) +int +nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) { - u64 tmp = ((u64)A.full << 13); + struct dcb_gpio_entry *gpio; + uint32_t r, s, v; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; + + if (nv50_gpio_location(gpio, &r, &s)) + return -EINVAL; - do_div(tmp, B.full); - tmp += 1; - tmp /= 2; - return lower_32_bits(tmp); + v = nv_rd32(dev, r) & ~(0x3 << s); + v |= (gpio->state[state] ^ 2) << s; + nv_wr32(dev, r, v); + return 0; } -#endif diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 857a09671a39..b203d06f601f 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev) static void nv50_graph_init_regs__nv(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t units = nv_rd32(dev, 0x1540); + int i; + NV_DEBUG(dev, "\n"); nv_wr32(dev, 0x400804, 0xc0000000); @@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev) nv_wr32(dev, 0x405018, 0xc0000000); nv_wr32(dev, 0x402000, 0xc0000000); + for (i = 0; i < 16; i++) { + if (units & 1 << i) { + if (dev_priv->chipset < 0xa0) { + nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); + nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); + nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); + } else { + nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); + nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); + nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); + } + } + } + nv_wr32(dev, 0x400108, 0xffffffff); nv_wr32(dev, 0x400824, 0x00004000); @@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan) nouveau_grctx_vals_load(dev, ctx); } nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); - if ((dev_priv->chipset & 0xf0) == 0xa0) - nv_wo32(dev, ctx, 0x00004/4, 0x00000000); - else - nv_wo32(dev, ctx, 0x0011c/4, 0x00000000); dev_priv->engine.instmem.finish_access(dev); return 0; @@ -396,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { { 0x5039, false, NULL }, /* m2mf */ { 0x502d, false, NULL }, /* 2d */ { 0x50c0, false, NULL }, /* compute */ + { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ { 0x5097, false, NULL }, /* tesla (nv50) */ - { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ - { 0x8397, false, NULL }, /* tesla (nva0) */ - { 0x8597, false, NULL }, /* tesla (nva8) */ + { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ + { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ + { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ {} }; diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d105fcd42ca0..42a8fb20c1e6 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -55,15 +55,18 @@ #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 #define CP_FLAG_AUTO_LOAD_PENDING 1 +#define CP_FLAG_NEWCTX ((2 * 32) + 10) +#define CP_FLAG_NEWCTX_BUSY 0 +#define CP_FLAG_NEWCTX_DONE 1 #define CP_FLAG_XFER ((2 * 32) + 11) #define CP_FLAG_XFER_IDLE 0 #define CP_FLAG_XFER_BUSY 1 -#define CP_FLAG_NEWCTX ((2 * 32) + 12) -#define CP_FLAG_NEWCTX_BUSY 0 -#define CP_FLAG_NEWCTX_DONE 1 #define CP_FLAG_ALWAYS ((2 * 32) + 13) #define CP_FLAG_ALWAYS_FALSE 0 #define CP_FLAG_ALWAYS_TRUE 1 +#define CP_FLAG_INTR ((2 * 32) + 15) +#define CP_FLAG_INTR_NOT_PENDING 0 +#define CP_FLAG_INTR_PENDING 1 #define CP_CTX 0x00100000 #define CP_CTX_COUNT 0x000f0000 @@ -174,6 +177,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx) case 0x96: case 0x98: case 0xa0: + case 0xa3: case 0xa5: case 0xa8: case 0xaa: @@ -214,6 +218,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx) cp_name(ctx, cp_setup_save); cp_set (ctx, UNK1D, SET); cp_wait(ctx, STATUS, BUSY); + cp_wait(ctx, INTR, PENDING); + cp_bra (ctx, STATUS, BUSY, cp_setup_save); cp_set (ctx, UNK01, SET); cp_set (ctx, SWAP_DIRECTION, SAVE); @@ -269,7 +275,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) int offset, base; uint32_t units = nv_rd32 (ctx->dev, 0x1540); - /* 0800 */ + /* 0800: DISPATCH */ cp_ctx(ctx, 0x400808, 7); gr_def(ctx, 0x400814, 0x00000030); cp_ctx(ctx, 0x400834, 0x32); @@ -300,7 +306,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x400b20, 0x0001629d); } - /* 0C00 */ + /* 0C00: VFETCH */ cp_ctx(ctx, 0x400c08, 0x2); gr_def(ctx, 0x400c08, 0x0000fe0c); @@ -326,7 +332,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x401540, 0x5); gr_def(ctx, 0x401550, 0x00001018); - /* 1800 */ + /* 1800: STREAMOUT */ cp_ctx(ctx, 0x401814, 0x1); gr_def(ctx, 0x401814, 0x000000ff); if (dev_priv->chipset == 0x50) { @@ -359,6 +365,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xac: gr_def(ctx, 0x401c00, 0x042500df); break; + case 0xa3: case 0xa5: case 0xa8: gr_def(ctx, 0x401c00, 0x142500df); @@ -413,6 +420,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) break; case 0x84: case 0xa0: + case 0xa3: case 0xa5: case 0xa8: case 0xaa: @@ -641,7 +649,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x4063e0, 0x1); - /* 6800 */ + /* 6800: M2MF */ if (dev_priv->chipset < 0x90) { cp_ctx(ctx, 0x406814, 0x2b); gr_def(ctx, 0x406818, 0x00000f80); @@ -787,6 +795,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa5: gr_def(ctx, offset + 0x1c, 0x310c0000); break; + case 0xa3: case 0xa8: case 0xaa: case 0xac: @@ -854,6 +863,8 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) else gr_def(ctx, offset + 0x8, 0x05010202); gr_def(ctx, offset + 0xc, 0x00030201); + if (dev_priv->chipset == 0xa3) + cp_ctx(ctx, base + 0x36c, 1); cp_ctx(ctx, base + 0x400, 2); gr_def(ctx, base + 0x404, 0x00000040); @@ -1154,7 +1165,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) nv50_graph_construct_gene_unk8(ctx); if (dev_priv->chipset == 0xa0) xf_emit(ctx, 0x189, 0); - else if (dev_priv->chipset < 0xa8) + else if (dev_priv->chipset == 0xa3) + xf_emit(ctx, 0xd5, 0); + else if (dev_priv->chipset == 0xa5) xf_emit(ctx, 0x99, 0); else if (dev_priv->chipset == 0xaa) xf_emit(ctx, 0x65, 0); @@ -1192,6 +1205,8 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) ctx->ctxvals_pos = offset + 4; if (dev_priv->chipset == 0xa0) xf_emit(ctx, 0xa80, 0); + else if (dev_priv->chipset == 0xa3) + xf_emit(ctx, 0xa7c, 0); else xf_emit(ctx, 0xa7a, 0); xf_emit(ctx, 1, 0x3fffff); @@ -1336,6 +1351,7 @@ nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) xf_emit(ctx, 0x942, 0); break; case 0xa0: + case 0xa3: xf_emit(ctx, 0x2042, 0); break; case 0xa5: diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index de1f5b0062c5..5f21df31f3aa 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan; uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; + uint32_t save_nv001700; + uint64_t v; struct nv50_instmem_priv *priv; int ret, i; - uint32_t v, save_nv001700; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; - else - dev_priv->vram_sys_base = 0; - /* Reserve the last MiB of VRAM, we should probably try to avoid * setting up the below tables over the top of the VBIOS image at * some point. */ dev_priv->ramin_rsvd_vram = 1 << 20; - c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; + c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; c_size = 128 << 10; c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; @@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev) dev_priv->vm_gart_size = NV50_VM_BLOCK; dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; - dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); + dev_priv->vm_vram_size = dev_priv->vram_size; if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); @@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev) i = 0; while (v < dev_priv->vram_sys_base + c_offset + c_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); v += 0x1000; i += 8; } diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index c2fff543b06f..812778db76ac 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, mode_ctl = 0x0200; break; case OUTPUT_DP: - mode_ctl |= 0x00050000; + mode_ctl |= (nv_encoder->dp.mc_unknown << 16); if (nv_encoder->dcb->sorconf.link & 1) mode_ctl |= 0x00000800; else @@ -319,5 +319,28 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + if (nv_encoder->dcb->type == OUTPUT_DP) { + int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); + uint32_t tmp; + + tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); + + switch ((tmp & 0x00000f00) >> 8) { + case 8: + case 9: + nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; + tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); + nv_encoder->dp.unk0 = tmp & 0x000001fc; + tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); + nv_encoder->dp.unk1 = tmp & 0x010f7f3f; + break; + default: + break; + } + + if (!nv_encoder->dp.mc_unknown) + nv_encoder->dp.mc_unknown = 5; + } + return 0; } diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 4c39a407aa4a..e671d0e74d4c 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -31,6 +31,7 @@ #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index ed38262d9985..84b1f2729d43 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable $(call if_changed,mkregtable) +$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable + $(call if_changed,mkregtable) + $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h $(obj)/r200.o: $(obj)/r200_reg_safe.h @@ -47,10 +50,12 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h +$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h + radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ radeon_irq.o r300_cmdbuf.o r600_cp.o # add KMS driver -radeon-y += radeon_device.o radeon_kms.o \ +radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ @@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o + evergreen.o evergreen_cs.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..1d569830ed99 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/sched.h> +#include <linux/slab.h> #include <asm/unaligned.h> #define ATOM_DEBUG @@ -52,15 +53,17 @@ typedef struct { struct atom_context *ctx; - uint32_t *ps, *ws; int ps_shift; uint16_t start; + unsigned last_jump; + unsigned long last_jump_jiffies; + bool abort; } atom_exec_context; int atom_debug = 0; -static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); -void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); +int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, @@ -604,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) { int idx = U8((*ptr)++); + int r = 0; + if (idx < ATOM_TABLE_NAMES_CNT) SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) - atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); + r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); + if (r) { + ctx->abort = true; + } } static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) @@ -673,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) { int execute = 0, target = U16(*ptr); + unsigned long cjiffies; + (*ptr) += 2; switch (arg) { case ATOM_COND_ABOVE: @@ -700,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) if (arg != ATOM_COND_ALWAYS) SDEBUG(" taken: %s\n", execute ? "yes" : "no"); SDEBUG(" target: 0x%04X\n", target); - if (execute) + if (execute) { + if (ctx->last_jump == (ctx->start + target)) { + cjiffies = jiffies; + if (time_after(cjiffies, ctx->last_jump_jiffies)) { + cjiffies -= ctx->last_jump_jiffies; + if ((jiffies_to_msecs(cjiffies) > 1000)) { + DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); + ctx->abort = true; + } + } else { + /* jiffies wrap around we will just wait a little longer */ + ctx->last_jump_jiffies = jiffies; + } + } else { + ctx->last_jump = ctx->start + target; + ctx->last_jump_jiffies = jiffies; + } *ptr = ctx->start + target; + } } static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) @@ -881,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + /* op needs to full dst value */ + dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; + dst &= atom_arg_mask[dst_align]; + dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } @@ -895,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + /* op needs to full dst value */ + dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; + dst &= atom_arg_mask[dst_align]; + dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } @@ -1104,15 +1141,16 @@ static struct { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; -static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; unsigned char op; atom_exec_context ectx; + int ret = 0; if (!base) - return; + return -EINVAL; len = CU16(base + ATOM_CT_SIZE_PTR); ws = CU8(base + ATOM_CT_WS_PTR); @@ -1125,6 +1163,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 ectx.ps_shift = ps / 4; ectx.start = base; ectx.ps = params; + ectx.abort = false; + ectx.last_jump = 0; if (ws) ectx.ws = kzalloc(4 * ws, GFP_KERNEL); else @@ -1137,6 +1177,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); else SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); + if (ectx.abort) { + DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", + base, len, ws, ps, ptr - 1); + ret = -EINVAL; + goto free; + } if (op < ATOM_OP_CNT && op > 0) opcode_table[op].func(&ectx, &ptr, @@ -1150,12 +1196,16 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 debug_depth--; SDEBUG("<<\n"); +free: if (ws) kfree(ectx.ws); + return ret; } -void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) { + int r; + mutex_lock(&ctx->mutex); /* reset reg block */ ctx->reg_block = 0; @@ -1163,8 +1213,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) ctx->fb_base = 0; /* reset io mode */ ctx->io_mode = ATOM_IO_MM; - atom_execute_table_locked(ctx, index, params); + r = atom_execute_table_locked(ctx, index, params); mutex_unlock(&ctx->mutex); + return r; } static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; @@ -1248,9 +1299,7 @@ int atom_asic_init(struct atom_context *ctx) if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; - atom_execute_table(ctx, ATOM_CMD_INIT, ps); - - return 0; + return atom_execute_table(ctx, ATOM_CMD_INIT, ps); } void atom_destroy(struct atom_context *ctx) @@ -1260,12 +1309,16 @@ void atom_destroy(struct atom_context *ctx) kfree(ctx); } -void atom_parse_data_header(struct atom_context *ctx, int index, +bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t * size, uint8_t * frev, uint8_t * crev, uint16_t * data_start) { int offset = index * 2 + 4; int idx = CU16(ctx->data_table + offset); + u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); + + if (!mdt[index]) + return false; if (size) *size = CU16(idx); @@ -1274,38 +1327,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, if (crev) *crev = CU8(idx + 3); *data_start = idx; - return; + return true; } -void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, +bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, uint8_t * crev) { int offset = index * 2 + 4; int idx = CU16(ctx->cmd_table + offset); + u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); + + if (!mct[index]) + return false; if (frev) *frev = CU8(idx + 2); if (crev) *crev = CU8(idx + 3); - return; + return true; } int atom_allocate_fb_scratch(struct atom_context *ctx) { int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); uint16_t data_offset; - int usage_bytes; + int usage_bytes = 0; struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; - atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); + if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { + firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); - firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); + DRM_DEBUG("atom firmware requested %08x %dkb\n", + firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, + firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); - DRM_DEBUG("atom firmware requested %08x %dkb\n", - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); - - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + } if (usage_bytes == 0) usage_bytes = 20 * 1024; /* allocate some scratch memory */ diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index bc73781423a1..cd1b64ab5ca7 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -140,11 +140,13 @@ struct atom_context { extern int atom_debug; struct atom_context *atom_parse(struct card_info *, void *); -void atom_execute_table(struct atom_context *, int, uint32_t *); +int atom_execute_table(struct atom_context *, int, uint32_t *); int atom_asic_init(struct atom_context *); void atom_destroy(struct atom_context *); -void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); -void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); +bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, + uint8_t *frev, uint8_t *crev, uint16_t *data_start); +bool atom_parse_cmd_header(struct atom_context *ctx, int index, + uint8_t *frev, uint8_t *crev); int atom_allocate_fb_scratch(struct atom_context *ctx); #include "atom-types.h" #include "atombios.h" diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 6732b5dd8ff4..1bc72c3190a9 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -2912,7 +2912,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 UCHAR ucTV_BootUpDefaultStandard; UCHAR ucExt_TV_ASIC_ID; UCHAR ucExt_TV_ASIC_SlaveAddr; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2]; }ATOM_ANALOG_TV_INFO_V1_2; typedef struct _ATOM_DPCD_INFO @@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT UCHAR ucReserved[2]; }ATOM_ASIC_SS_ASSIGNMENT; -//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. +//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type. //SS is not required or enabled if a match is not found. #define ASIC_INTERNAL_MEMORY_SS 1 #define ASIC_INTERNAL_ENGINE_SS 2 @@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER #define ATOM_PP_THERMALCONTROLLER_RV6xx 7 #define ATOM_PP_THERMALCONTROLLER_RV770 8 #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 +#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 +#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller typedef struct _ATOM_PPLIB_STATE { @@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE UCHAR ucClockStateIndices[1]; // variable-sized } ATOM_PPLIB_STATE; +typedef struct _ATOM_PPLIB_FANTABLE +{ + UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. + UCHAR ucTHyst; // Temperature hysteresis. Integer. + USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. + USHORT usTMed; // The middle temperature where we change slopes. + USHORT usTHigh; // The high point above TMed for adjusting the second slope. + USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). + USHORT usPWMMed; // The PWM value (in percent) at TMed. + USHORT usPWMHigh; // The PWM value at THigh. +} ATOM_PPLIB_FANTABLE; + +typedef struct _ATOM_PPLIB_EXTENDEDHEADER +{ + USHORT usSize; + ULONG ulMaxEngineClock; // For Overdrive. + ULONG ulMaxMemoryClock; // For Overdrive. + // Add extra system parameters here, always adjust size to include all fields. +} ATOM_PPLIB_EXTENDEDHEADER; + //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 @@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 +#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 +#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. +#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). +#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. +#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. +#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. typedef struct _ATOM_PPLIB_POWERPLAYTABLE { @@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE } ATOM_PPLIB_POWERPLAYTABLE; +typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 +{ + ATOM_PPLIB_POWERPLAYTABLE basicTable; + UCHAR ucNumCustomThermalPolicy; + USHORT usCustomThermalPolicyArrayOffset; +}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 +{ + ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; + USHORT usFormatID; // To be used ONLY by PPGen. + USHORT usFanTableOffset; + USHORT usExtendendedHeaderOffset; +} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; + //// ATOM_PPLIB_NONCLOCK_INFO::usClassification #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 @@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 #define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 #define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 -// remaining 3 bits are reserved +#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 +#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 +#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 @@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 +#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 #define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 -#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 +//memory related flags +#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 + +//M3 Arb //2bits, current 3 sets of parameters in total +#define ATOM_PPLIB_M3ARB_MASK 0x00060000 +#define ATOM_PPLIB_M3ARB_SHIFT 17 // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. @@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE::ucClockStateIndices +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 + typedef struct _ATOM_PPLIB_R600_CLOCK_INFO { USHORT usEngineClockLow; @@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 +#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). + +typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + USHORT usUnused; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO @@ -5895,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO UCHAR ucPadding; // For proper alignment and size. USHORT usVDDC; // For the 780, use: None, Low, High, Variable UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} - UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement. USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). ULONG ulFlags; } ATOM_PPLIB_RS780_CLOCK_INFO; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dd9fdf560611..8c2d6478a221 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -26,7 +26,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> -#include "radeon_fixed.h" +#include <drm/drm_fixed.h> #include "radeon.h" #include "atom.h" #include "atom-bits.h" @@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: + radeon_crtc->enabled = true; + /* adjust pm to dpms changes BEFORE enabling crtcs */ + radeon_pm_compute_clocks(rdev); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); - /* XXX re-enable when interrupt support is added */ - if (!ASIC_IS_DCE4(rdev)) - drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - /* XXX re-enable when interrupt support is added */ - if (!ASIC_IS_DCE4(rdev)) - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); + radeon_crtc->enabled = false; + /* adjust pm to dpms changes AFTER disabling crtcs */ + radeon_pm_compute_clocks(rdev); break; } } @@ -353,12 +355,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +static void atombios_disable_ss(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + u32 ss_cntl; + + if (ASIC_IS_DCE4(rdev)) { + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: + ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); + ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; + WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); + break; + case ATOM_PPLL2: + ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); + ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; + WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: + return; + } + } else if (ASIC_IS_AVIVO(rdev)) { + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: + ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); + ss_cntl &= ~1; + WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); + break; + case ATOM_PPLL2: + ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); + ss_cntl &= ~1; + WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: + return; + } + } +} + + union atom_enable_ss { ENABLE_LVDS_SS_PARAMETERS legacy; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; }; -static void atombios_set_ss(struct drm_crtc *crtc, int enable) +static void atombios_enable_ss(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -387,9 +432,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) step = dig->ss->step; delay = dig->ss->delay; range = dig->ss->range; - } else if (enable) + } else return; - } else if (enable) + } else return; break; } @@ -406,13 +451,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) args.v1.ucSpreadSpectrumDelay = delay; args.v1.ucSpreadSpectrumRange = range; args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - args.v1.ucEnable = enable; + args.v1.ucEnable = ATOM_ENABLE; } else { args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); args.legacy.ucSpreadSpectrumType = type; args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; - args.legacy.ucEnable = enable; + args.legacy.ucEnable = ATOM_ENABLE; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -453,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | + pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ @@ -478,10 +523,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; - /* LVDS PLL quirks */ - if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - pll->algo = dig->pll_algo; + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { + pll->algo = PLL_ALGO_LEGACY; + pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) @@ -503,8 +547,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, int index; index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, - &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev)) + return adjusted_clock; memset(&args, 0, sizeof(args)); @@ -542,11 +587,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { /* may want to enable SS on DP/eDP eventually */ - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE; - if (mode->clock > 165000) + /*args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE;*/ + if (encoder_mode == ATOM_ENCODER_MODE_DP) args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_DUAL_LINK; + DISPPLL_CONFIG_COHERENT_MODE; + else { + if (mode->clock > 165000) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_DUAL_LINK; + } } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -592,8 +642,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, - &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev)) + return; switch (frev) { case 1: @@ -656,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: + default: pll = &rdev->clock.dcpll; break; } @@ -667,8 +719,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode &ref_div, &post_div); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, - &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev)) + return; switch (frev) { case 1: @@ -1083,15 +1136,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, /* TODO color tiling */ - /* pick pll */ - radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); - - atombios_set_ss(crtc, 0); + atombios_disable_ss(crtc); /* always set DCPLL */ if (ASIC_IS_DCE4(rdev)) atombios_crtc_set_dcpll(crtc); atombios_crtc_set_pll(crtc, adjusted_mode); - atombios_set_ss(crtc, 1); + atombios_enable_ss(crtc); if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); @@ -1113,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + + /* adjust pm to upcoming mode change */ + radeon_pm_compute_clocks(rdev); + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; @@ -1120,6 +1176,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + /* pick pll */ + radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 8a133bda00a2..abffb1499e22 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -351,7 +351,7 @@ retry: args.v1.ucChannelID = chan->rec.i2c_id; args.v1.ucDelay = delay / 10; if (ASIC_IS_DCE4(rdev)) - args.v2.ucHPD_ID = chan->rec.hpd_id; + args.v2.ucHPD_ID = chan->rec.hpd; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder, >> DP_TRAIN_PRE_EMPHASIS_SHIFT); /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + + /* disable the training pattern on the source */ if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); else radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, dig_connector->dp_clock, enc_id, 0); - - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, - dig_connector->dp_clock, enc_id, 0); } int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bd2e7aa85c1d..1caf625e472b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -23,42 +23,251 @@ */ #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "radeon_drm.h" -#include "rv770d.h" +#include "evergreend.h" #include "atom.h" #include "avivod.h" #include "evergreen_reg.h" +#define EVERGREEN_PFP_UCODE_SIZE 1120 +#define EVERGREEN_PM4_UCODE_SIZE 1376 + static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); +void evergreen_pm_misc(struct radeon_device *rdev) +{ + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } +} + +void evergreen_pm_prepare(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* disable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); + } + } +} + +void evergreen_pm_finish(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* enable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); + } + } +} + bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { bool connected = false; - /* XXX */ + + switch (hpd) { + case RADEON_HPD_1: + if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_2: + if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_3: + if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_4: + if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_5: + if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_6: + if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + default: + break; + } + return connected; } void evergreen_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd) { - /* XXX */ + u32 tmp; + bool connected = evergreen_hpd_sense(rdev, hpd); + + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(DC_HPD1_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(DC_HPD2_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + break; + case RADEON_HPD_3: + tmp = RREG32(DC_HPD3_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + break; + case RADEON_HPD_4: + tmp = RREG32(DC_HPD4_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + break; + case RADEON_HPD_5: + tmp = RREG32(DC_HPD5_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + break; + case RADEON_HPD_6: + tmp = RREG32(DC_HPD6_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + break; + default: + break; + } } void evergreen_hpd_init(struct radeon_device *rdev) { - /* XXX */ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | + DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, tmp); + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, tmp); + rdev->irq.hpd[1] = true; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, tmp); + rdev->irq.hpd[2] = true; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, tmp); + rdev->irq.hpd[3] = true; + break; + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, tmp); + rdev->irq.hpd[4] = true; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, tmp); + rdev->irq.hpd[5] = true; + break; + default: + break; + } + } + if (rdev->irq.installed) + evergreen_irq_set(rdev); } - -void evergreen_bandwidth_update(struct radeon_device *rdev) +void evergreen_hpd_fini(struct radeon_device *rdev) { - /* XXX */ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, 0); + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, 0); + rdev->irq.hpd[1] = false; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, 0); + rdev->irq.hpd[2] = false; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, 0); + rdev->irq.hpd[3] = false; + break; + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, 0); + rdev->irq.hpd[4] = false; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, 0); + rdev->irq.hpd[5] = false; + break; + default: + break; + } + } } -void evergreen_hpd_fini(struct radeon_device *rdev) +void evergreen_bandwidth_update(struct radeon_device *rdev) { /* XXX */ } @@ -81,10 +290,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) /* * GART */ +void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) +{ + unsigned i; + u32 tmp; + + WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); + tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; + if (tmp == 2) { + printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); + return; + } + if (tmp) { + return; + } + udelay(1); + } +} + int evergreen_pcie_gart_enable(struct radeon_device *rdev) { u32 tmp; - int r, i; + int r; if (rdev->gart.table.vram.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); @@ -119,10 +349,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); - for (i = 1; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT1_CNTL, 0); - r600_pcie_gart_tlb_flush(rdev); + evergreen_pcie_gart_tlb_flush(rdev); rdev->gart.ready = true; return 0; } @@ -130,11 +359,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) void evergreen_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i, r; + int r; /* Disable all tables */ - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT0_CNTL, 0); + WREG32(VM_CONTEXT1_CNTL, 0); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | @@ -171,7 +400,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev) void evergreen_agp_enable(struct radeon_device *rdev) { u32 tmp; - int i; /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | @@ -191,8 +419,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + WREG32(VM_CONTEXT0_CNTL, 0); + WREG32(VM_CONTEXT1_CNTL, 0); } static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -379,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); @@ -398,45 +626,683 @@ static void evergreen_mc_program(struct radeon_device *rdev) rv515_vga_render_disable(rdev); } -#if 0 /* * CP. */ -static void evergreen_cp_stop(struct radeon_device *rdev) -{ - /* XXX */ -} - static int evergreen_cp_load_microcode(struct radeon_device *rdev) { - /* XXX */ + const __be32 *fw_data; + int i; + if (!rdev->me_fw || !rdev->pfp_fw) + return -EINVAL; + + r700_cp_stop(rdev); + WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); + + fw_data = (const __be32 *)rdev->pfp_fw->data; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) + WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + fw_data = (const __be32 *)rdev->me_fw->data; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) + WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); + + WREG32(CP_PFP_UCODE_ADDR, 0); + WREG32(CP_ME_RAM_WADDR, 0); + WREG32(CP_ME_RAM_RADDR, 0); return 0; } +int evergreen_cp_resume(struct radeon_device *rdev) +{ + u32 tmp; + u32 rb_bufsz; + int r; + + /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ + WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | + SOFT_RESET_PA | + SOFT_RESET_SH | + SOFT_RESET_VGT | + SOFT_RESET_SX)); + RREG32(GRBM_SOFT_RESET); + mdelay(15); + WREG32(GRBM_SOFT_RESET, 0); + RREG32(GRBM_SOFT_RESET); + + /* Set ring buffer size */ + rb_bufsz = drm_order(rdev->cp.ring_size / 8); + tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(CP_RB_CNTL, tmp); + WREG32(CP_SEM_WAIT_TIMER, 0x4); + + /* Set the write pointer delay */ + WREG32(CP_RB_WPTR_DELAY, 0); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); + WREG32(CP_RB_RPTR_WR, 0); + WREG32(CP_RB_WPTR, 0); + WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + mdelay(1); + WREG32(CP_RB_CNTL, tmp); + + WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); + WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); + + rdev->cp.rptr = RREG32(CP_RB_RPTR); + rdev->cp.wptr = RREG32(CP_RB_WPTR); + + r600_cp_start(rdev); + rdev->cp.ready = true; + r = radeon_ring_test(rdev); + if (r) { + rdev->cp.ready = false; + return r; + } + return 0; +} /* * Core functions */ -static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, +static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, + u32 num_tile_pipes, u32 num_backends, u32 backend_disable_mask) { u32 backend_map = 0; + u32 enabled_backends_mask = 0; + u32 enabled_backends_count = 0; + u32 cur_pipe; + u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; + u32 cur_backend = 0; + u32 i; + bool force_no_swizzle; + + if (num_tile_pipes > EVERGREEN_MAX_PIPES) + num_tile_pipes = EVERGREEN_MAX_PIPES; + if (num_tile_pipes < 1) + num_tile_pipes = 1; + if (num_backends > EVERGREEN_MAX_BACKENDS) + num_backends = EVERGREEN_MAX_BACKENDS; + if (num_backends < 1) + num_backends = 1; + + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((backend_disable_mask >> i) & 1) == 0) { + enabled_backends_mask |= (1 << i); + ++enabled_backends_count; + } + if (enabled_backends_count == num_backends) + break; + } + + if (enabled_backends_count == 0) { + enabled_backends_mask = 1; + enabled_backends_count = 1; + } + + if (enabled_backends_count != num_backends) + num_backends = enabled_backends_count; + + memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_REDWOOD: + force_no_swizzle = false; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + case CHIP_JUNIPER: + default: + force_no_swizzle = true; + break; + } + if (force_no_swizzle) { + bool last_backend_enabled = false; + + force_no_swizzle = false; + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((enabled_backends_mask >> i) & 1) == 1) { + if (last_backend_enabled) + force_no_swizzle = true; + last_backend_enabled = true; + } else + last_backend_enabled = false; + } + } + + switch (num_tile_pipes) { + case 1: + case 3: + case 5: + case 7: + DRM_ERROR("odd number of pipes!\n"); + break; + case 2: + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + break; + case 4: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 1; + swizzle_pipe[3] = 3; + } + break; + case 6: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 1; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 5; + } + break; + case 8: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + swizzle_pipe[7] = 7; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 1; + swizzle_pipe[5] = 3; + swizzle_pipe[6] = 5; + swizzle_pipe[7] = 7; + } + break; + } + + for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { + while (((1 << cur_backend) & enabled_backends_mask) == 0) + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + + backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); + + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + } return backend_map; } -#endif static void evergreen_gpu_init(struct radeon_device *rdev) { - /* XXX */ + u32 cc_rb_backend_disable = 0; + u32 cc_gc_shader_pipe_config; + u32 gb_addr_config = 0; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 gb_backend_map; + u32 grbm_gfx_index; + u32 sx_debug_1; + u32 smx_dc_ctl0; + u32 sq_config; + u32 sq_lds_resource_mgmt; + u32 sq_gpr_resource_mgmt_1; + u32 sq_gpr_resource_mgmt_2; + u32 sq_gpr_resource_mgmt_3; + u32 sq_thread_resource_mgmt; + u32 sq_thread_resource_mgmt_2; + u32 sq_stack_resource_mgmt_1; + u32 sq_stack_resource_mgmt_2; + u32 sq_stack_resource_mgmt_3; + u32 vgt_cache_invalidation; + u32 hdp_host_path_cntl; + int i, j, num_shader_engines, ps_thread_count; + + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + rdev->config.evergreen.num_ses = 2; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 8; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_JUNIPER: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_REDWOOD: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 5; + rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_CEDAR: + default: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 2; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + } + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x18) { + WREG32((0x2c14 + j), 0x00000000); + WREG32((0x2c18 + j), 0x00000000); + WREG32((0x2c1c + j), 0x00000000); + WREG32((0x2c20 + j), 0x00000000); + WREG32((0x2c24 + j), 0x00000000); + } + + WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; + + cc_gc_shader_pipe_config |= + INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) + & EVERGREEN_MAX_PIPES_MASK); + cc_gc_shader_pipe_config |= + INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) + & EVERGREEN_MAX_SIMDS_MASK); + + cc_rb_backend_disable = + BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) + & EVERGREEN_MAX_BACKENDS_MASK); + + + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + + switch (rdev->config.evergreen.max_tile_pipes) { + case 1: + default: + gb_addr_config |= NUM_PIPES(0); + break; + case 2: + gb_addr_config |= NUM_PIPES(1); + break; + case 4: + gb_addr_config |= NUM_PIPES(2); + break; + case 8: + gb_addr_config |= NUM_PIPES(3); + break; + } + + gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + gb_addr_config |= BANK_INTERLEAVE_SIZE(0); + gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); + gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); + gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ + gb_addr_config |= MULTI_GPU_TILE_SIZE(2); + + if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) + gb_addr_config |= ROW_SIZE(2); + else + gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); + + if (rdev->ddev->pdev->device == 0x689e) { + u32 efuse_straps_4; + u32 efuse_straps_3; + u8 efuse_box_bit_131_124; + + WREG32(RCU_IND_INDEX, 0x204); + efuse_straps_4 = RREG32(RCU_IND_DATA); + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); + + switch(efuse_box_bit_131_124) { + case 0x00: + gb_backend_map = 0x76543210; + break; + case 0x55: + gb_backend_map = 0x77553311; + break; + case 0x56: + gb_backend_map = 0x77553300; + break; + case 0x59: + gb_backend_map = 0x77552211; + break; + case 0x66: + gb_backend_map = 0x77443300; + break; + case 0x99: + gb_backend_map = 0x66552211; + break; + case 0x5a: + gb_backend_map = 0x77552200; + break; + case 0xaa: + gb_backend_map = 0x66442200; + break; + case 0x95: + gb_backend_map = 0x66553311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else if (rdev->ddev->pdev->device == 0x68b9) { + u32 efuse_straps_3; + u8 efuse_box_bit_127_124; + + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; + + switch(efuse_box_bit_127_124) { + case 0x0: + gb_backend_map = 0x00003210; + break; + case 0x5: + case 0x6: + case 0x9: + case 0xa: + gb_backend_map = 0x00003311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + + WREG32(GB_BACKEND_MAP, gb_backend_map); + WREG32(GB_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CONFIG, gb_addr_config); + WREG32(HDP_ADDR_CONFIG, gb_addr_config); + + num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; + grbm_gfx_index = INSTANCE_BROADCAST_WRITES; + + for (i = 0; i < rdev->config.evergreen.num_ses; i++) { + u32 rb = cc_rb_backend_disable | (0xf0 << 16); + u32 sp = cc_gc_shader_pipe_config; + u32 gfx = grbm_gfx_index | SE_INDEX(i); + + if (i == num_shader_engines) { + rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); + sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); + } + + WREG32(GRBM_GFX_INDEX, gfx); + WREG32(RLC_GFX_INDEX, gfx); + + WREG32(CC_RB_BACKEND_DISABLE, rb); + WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); + WREG32(GC_USER_RB_BACKEND_DISABLE, rb); + WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); + } + + grbm_gfx_index |= SE_BROADCAST_WRITES; + WREG32(GRBM_GFX_INDEX, grbm_gfx_index); + WREG32(RLC_GFX_INDEX, grbm_gfx_index); + + WREG32(CGTS_SYS_TCC_DISABLE, 0); + WREG32(CGTS_TCC_DISABLE, 0); + WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); + WREG32(CGTS_USER_TCC_DISABLE, 0); + + /* set HW defaults for 3D engine */ + WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | + ROQ_IB2_START(0x2b))); + + WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); + + WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | + SYNC_GRADIENT | + SYNC_WALKER | + SYNC_ALIGNER)); + + sx_debug_1 = RREG32(SX_DEBUG_1); + sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; + WREG32(SX_DEBUG_1, sx_debug_1); + + + smx_dc_ctl0 = RREG32(SMX_DC_CTL0); + smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + WREG32(SMX_DC_CTL0, smx_dc_ctl0); + + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + + WREG32(VGT_NUM_INSTANCES, 1); + WREG32(SPI_CONFIG_CNTL, 0); + WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); + WREG32(CP_PERFMON_CNTL, 0); + + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + FETCH_FIFO_HIWATER(0x4) | + DONE_FIFO_HIWATER(0xe0) | + ALU_UPDATE_FIFO_HIWATER(0x8))); + + sq_config = RREG32(SQ_CONFIG); + sq_config &= ~(PS_PRIO(3) | + VS_PRIO(3) | + GS_PRIO(3) | + ES_PRIO(3)); + sq_config |= (VC_ENABLE | + EXPORT_SRC_C | + PS_PRIO(0) | + VS_PRIO(1) | + GS_PRIO(2) | + ES_PRIO(3)); + + if (rdev->family == CHIP_CEDAR) + /* no vertex cache */ + sq_config &= ~VC_ENABLE; + + sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); + + sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); + sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); + sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); + sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + + if (rdev->family == CHIP_CEDAR) + ps_thread_count = 96; + else + ps_thread_count = 128; + + sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); + sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); + + sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + + WREG32(SQ_CONFIG, sq_config); + WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); + WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); + WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); + WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); + WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); + WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); + WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); + WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); + + WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | + FORCE_EOV_MAX_REZ_CNT(255))); + + if (rdev->family == CHIP_CEDAR) + vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); + else + vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); + vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); + WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); + + WREG32(VGT_GS_VERTEX_REUSE, 16); + WREG32(PA_SC_LINE_STIPPLE_STATE, 0); + + WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); + WREG32(VGT_OUT_DEALLOC_CNTL, 16); + + WREG32(CB_PERF_CTR0_SEL_0, 0); + WREG32(CB_PERF_CTR0_SEL_1, 0); + WREG32(CB_PERF_CTR1_SEL_0, 0); + WREG32(CB_PERF_CTR1_SEL_1, 0); + WREG32(CB_PERF_CTR2_SEL_0, 0); + WREG32(CB_PERF_CTR2_SEL_1, 0); + WREG32(CB_PERF_CTR3_SEL_0, 0); + WREG32(CB_PERF_CTR3_SEL_1, 0); + + /* clear render buffer base addresses */ + WREG32(CB_COLOR0_BASE, 0); + WREG32(CB_COLOR1_BASE, 0); + WREG32(CB_COLOR2_BASE, 0); + WREG32(CB_COLOR3_BASE, 0); + WREG32(CB_COLOR4_BASE, 0); + WREG32(CB_COLOR5_BASE, 0); + WREG32(CB_COLOR6_BASE, 0); + WREG32(CB_COLOR7_BASE, 0); + WREG32(CB_COLOR8_BASE, 0); + WREG32(CB_COLOR9_BASE, 0); + WREG32(CB_COLOR10_BASE, 0); + WREG32(CB_COLOR11_BASE, 0); + + /* set the shader const cache sizes to 0 */ + for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) + WREG32(i, 0); + for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) + WREG32(i, 0); + + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); + WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); + + WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); + + udelay(50); + } int evergreen_mc_init(struct radeon_device *rdev) { - fixed20_12 a; u32 tmp; int chansize, numchan; @@ -475,30 +1341,627 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.visible_vram_size = rdev->mc.aper_size; - /* FIXME remove this once we support unmappable VRAM */ - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { - rdev->mc.mc_vram_size = rdev->mc.aper_size; - rdev->mc.real_vram_size = rdev->mc.aper_size; - } r600_vram_gtt_location(rdev, &rdev->mc); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + radeon_update_bandwidth_info(rdev); + return 0; } -int evergreen_gpu_reset(struct radeon_device *rdev) +bool evergreen_gpu_is_lockup(struct radeon_device *rdev) { /* FIXME: implement for evergreen */ + return false; +} + +static int evergreen_gpu_soft_reset(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 srbm_reset = 0; + u32 grbm_reset = 0; + + dev_info(rdev->dev, "GPU softreset \n"); + dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(GRBM_STATUS)); + dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(GRBM_STATUS_SE0)); + dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(GRBM_STATUS_SE1)); + dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(SRBM_STATUS)); + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + /* Disable CP parsing/prefetching */ + WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); + + /* reset all the gfx blocks */ + grbm_reset = (SOFT_RESET_CP | + SOFT_RESET_CB | + SOFT_RESET_DB | + SOFT_RESET_PA | + SOFT_RESET_SC | + SOFT_RESET_SPI | + SOFT_RESET_SH | + SOFT_RESET_SX | + SOFT_RESET_TC | + SOFT_RESET_TA | + SOFT_RESET_VC | + SOFT_RESET_VGT); + + dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); + WREG32(GRBM_SOFT_RESET, grbm_reset); + (void)RREG32(GRBM_SOFT_RESET); + udelay(50); + WREG32(GRBM_SOFT_RESET, 0); + (void)RREG32(GRBM_SOFT_RESET); + + /* reset all the system blocks */ + srbm_reset = SRBM_SOFT_RESET_ALL_MASK; + + dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); + WREG32(SRBM_SOFT_RESET, srbm_reset); + (void)RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + (void)RREG32(SRBM_SOFT_RESET); + /* Wait a little for things to settle down */ + udelay(50); + dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(GRBM_STATUS)); + dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(GRBM_STATUS_SE0)); + dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(GRBM_STATUS_SE1)); + dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(SRBM_STATUS)); + /* After reset we need to reinit the asic as GPU often endup in an + * incoherent state. + */ + atom_asic_init(rdev->mode_info.atom_context); + evergreen_mc_resume(rdev, &save); return 0; } +int evergreen_asic_reset(struct radeon_device *rdev) +{ + return evergreen_gpu_soft_reset(rdev); +} + +/* Interrupts */ + +u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) +{ + switch (crtc) { + case 0: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); + case 1: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); + case 2: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); + case 3: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); + case 4: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); + case 5: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); + default: + return 0; + } +} + +void evergreen_disable_interrupt_state(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(CP_INT_CNTL, 0); + WREG32(GRBM_INT_CNTL, 0); + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DACB_AUTODETECT_INT_CONTROL, 0); + + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + +} + +int evergreen_irq_set(struct radeon_device *rdev) +{ + u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 grbm_int_cntl = 0; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + return -EINVAL; + } + /* don't enable anything if the ih is disabled */ + if (!rdev->ih.enabled) { + r600_disable_interrupts(rdev); + /* force the active interrupt state to all disabled */ + evergreen_disable_interrupt_state(rdev); + return 0; + } + + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + + if (rdev->irq.sw_int) { + DRM_DEBUG("evergreen_irq_set: sw int\n"); + cp_int_cntl |= RB_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0]) { + DRM_DEBUG("evergreen_irq_set: vblank 0\n"); + crtc1 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[1]) { + DRM_DEBUG("evergreen_irq_set: vblank 1\n"); + crtc2 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[2]) { + DRM_DEBUG("evergreen_irq_set: vblank 2\n"); + crtc3 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[3]) { + DRM_DEBUG("evergreen_irq_set: vblank 3\n"); + crtc4 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[4]) { + DRM_DEBUG("evergreen_irq_set: vblank 4\n"); + crtc5 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[5]) { + DRM_DEBUG("evergreen_irq_set: vblank 5\n"); + crtc6 |= VBLANK_INT_MASK; + } + if (rdev->irq.hpd[0]) { + DRM_DEBUG("evergreen_irq_set: hpd 1\n"); + hpd1 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[1]) { + DRM_DEBUG("evergreen_irq_set: hpd 2\n"); + hpd2 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[2]) { + DRM_DEBUG("evergreen_irq_set: hpd 3\n"); + hpd3 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[3]) { + DRM_DEBUG("evergreen_irq_set: hpd 4\n"); + hpd4 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[4]) { + DRM_DEBUG("evergreen_irq_set: hpd 5\n"); + hpd5 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[5]) { + DRM_DEBUG("evergreen_irq_set: hpd 6\n"); + hpd6 |= DC_HPDx_INT_EN; + } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } + + WREG32(CP_INT_CNTL, cp_int_cntl); + WREG32(GRBM_INT_CNTL, grbm_int_cntl); + + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + + return 0; +} + +static inline void evergreen_irq_ack(struct radeon_device *rdev, + u32 *disp_int, + u32 *disp_int_cont, + u32 *disp_int_cont2, + u32 *disp_int_cont3, + u32 *disp_int_cont4, + u32 *disp_int_cont5) +{ + u32 tmp; + + *disp_int = RREG32(DISP_INTERRUPT_STATUS); + *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); + *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); + *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); + *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); + + if (*disp_int & LB_D1_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int & LB_D1_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); + if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); + + if (*disp_int & DC_HPD1_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (*disp_int_cont & DC_HPD2_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +} + +void evergreen_irq_disable(struct radeon_device *rdev) +{ + u32 disp_int, disp_int_cont, disp_int_cont2; + u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; + + r600_disable_interrupts(rdev); + /* Wait and acknowledge irq */ + mdelay(1); + evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, + &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_disable_interrupt_state(rdev); +} + +static void evergreen_irq_suspend(struct radeon_device *rdev) +{ + evergreen_irq_disable(rdev); + r600_rlc_stop(rdev); +} + +static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) +{ + u32 wptr, tmp; + + /* XXX use writeback */ + wptr = RREG32(IH_RB_WPTR); + + if (wptr & RB_OVERFLOW) { + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", + wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; + tmp = RREG32(IH_RB_CNTL); + tmp |= IH_WPTR_OVERFLOW_CLEAR; + WREG32(IH_RB_CNTL, tmp); + } + return (wptr & rdev->ih.ptr_mask); +} + +int evergreen_irq_process(struct radeon_device *rdev) +{ + u32 wptr = evergreen_get_ih_wptr(rdev); + u32 rptr = rdev->ih.rptr; + u32 src_id, src_data; + u32 ring_index; + u32 disp_int, disp_int_cont, disp_int_cont2; + u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; + unsigned long flags; + bool queue_hotplug = false; + + DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + if (!rdev->ih.enabled) + return IRQ_NONE; + + spin_lock_irqsave(&rdev->ih.lock, flags); + + if (rptr == wptr) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + if (rdev->shutdown) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + +restart_ih: + /* display interrupts */ + evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, + &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + + rdev->ih.wptr = wptr; + while (rptr != wptr) { + /* wptr/rptr are in bytes! */ + ring_index = rptr / 4; + src_id = rdev->ih.ring[ring_index] & 0xff; + src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; + + switch (src_id) { + case 1: /* D1 vblank/vline */ + switch (src_data) { + case 0: /* D1 vblank */ + if (disp_int & LB_D1_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 0); + wake_up(&rdev->irq.vblank_queue); + disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + } + break; + case 1: /* D1 vline */ + if (disp_int & LB_D1_VLINE_INTERRUPT) { + disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 2: /* D2 vblank/vline */ + switch (src_data) { + case 0: /* D2 vblank */ + if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 1); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + } + break; + case 1: /* D2 vline */ + if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { + disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 3: /* D3 vblank/vline */ + switch (src_data) { + case 0: /* D3 vblank */ + if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 2); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + } + break; + case 1: /* D3 vline */ + if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { + disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 4: /* D4 vblank/vline */ + switch (src_data) { + case 0: /* D4 vblank */ + if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 3); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + } + break; + case 1: /* D4 vline */ + if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { + disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 5: /* D5 vblank/vline */ + switch (src_data) { + case 0: /* D5 vblank */ + if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 4); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + } + break; + case 1: /* D5 vline */ + if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { + disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 6: /* D6 vblank/vline */ + switch (src_data) { + case 0: /* D6 vblank */ + if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 5); + wake_up(&rdev->irq.vblank_queue); + disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + } + break; + case 1: /* D6 vline */ + if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { + disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 42: /* HPD hotplug */ + switch (src_data) { + case 0: + if (disp_int & DC_HPD1_INTERRUPT) { + disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + } + break; + case 1: + if (disp_int_cont & DC_HPD2_INTERRUPT) { + disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + } + break; + case 2: + if (disp_int_cont2 & DC_HPD3_INTERRUPT) { + disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + } + break; + case 3: + if (disp_int_cont3 & DC_HPD4_INTERRUPT) { + disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + } + break; + case 4: + if (disp_int_cont4 & DC_HPD5_INTERRUPT) { + disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + } + break; + case 5: + if (disp_int_cont5 & DC_HPD6_INTERRUPT) { + disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 176: /* CP_INT in ring buffer */ + case 177: /* CP_INT in IB1 */ + case 178: /* CP_INT in IB2 */ + DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); + radeon_fence_process(rdev); + break; + case 181: /* CP EOP event */ + DRM_DEBUG("IH: CP EOP\n"); + break; + case 233: /* GUI IDLE */ + DRM_DEBUG("IH: CP EOP\n"); + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + + /* wptr/rptr are in bytes! */ + rptr += 16; + rptr &= rdev->ih.ptr_mask; + } + /* make sure wptr hasn't changed while processing */ + wptr = evergreen_get_ih_wptr(rdev); + if (wptr != rdev->ih.wptr) + goto restart_ih; + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); + rdev->ih.rptr = rptr; + WREG32(IH_RB_RPTR, rdev->ih.rptr); + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_HANDLED; +} + static int evergreen_startup(struct radeon_device *rdev) { -#if 0 int r; if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { @@ -508,17 +1971,15 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } } -#endif + evergreen_mc_program(rdev); -#if 0 if (rdev->flags & RADEON_IS_AGP) { - evergreem_agp_enable(rdev); + evergreen_agp_enable(rdev); } else { r = evergreen_pcie_gart_enable(rdev); if (r) return r; } -#endif evergreen_gpu_init(rdev); #if 0 if (!rdev->r600_blit.shader_obj) { @@ -539,6 +2000,7 @@ static int evergreen_startup(struct radeon_device *rdev) DRM_ERROR("failed to pin blit object %d\n", r); return r; } +#endif /* Enable IRQ */ r = r600_irq_init(rdev); @@ -547,7 +2009,7 @@ static int evergreen_startup(struct radeon_device *rdev) radeon_irq_kms_fini(rdev); return r; } - r600_irq_set(rdev); + evergreen_irq_set(rdev); r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) @@ -555,12 +2017,12 @@ static int evergreen_startup(struct radeon_device *rdev) r = evergreen_cp_load_microcode(rdev); if (r) return r; - r = r600_cp_resume(rdev); + r = evergreen_cp_resume(rdev); if (r) return r; /* write back buffer are not vital so don't worry about failure */ r600_wb_enable(rdev); -#endif + return 0; } @@ -585,13 +2047,13 @@ int evergreen_resume(struct radeon_device *rdev) DRM_ERROR("r600 startup failed on resume\n"); return r; } -#if 0 + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; } -#endif + return r; } @@ -600,12 +2062,14 @@ int evergreen_suspend(struct radeon_device *rdev) { #if 0 int r; - +#endif /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; + evergreen_irq_suspend(rdev); r600_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); +#if 0 /* unpin shaders bo */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (likely(r == 0)) { @@ -685,8 +2149,6 @@ int evergreen_init(struct radeon_device *rdev) r = radeon_clocks_init(rdev); if (r) return r; - /* Initialize power management */ - radeon_pm_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -705,7 +2167,7 @@ int evergreen_init(struct radeon_device *rdev) r = radeon_bo_init(rdev); if (r) return r; -#if 0 + r = radeon_irq_kms_init(rdev); if (r) return r; @@ -719,14 +2181,16 @@ int evergreen_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; -#endif - rdev->accel_working = false; + + rdev->accel_working = true; r = evergreen_startup(rdev); if (r) { - evergreen_suspend(rdev); - /*r600_wb_fini(rdev);*/ - /*radeon_ring_fini(rdev);*/ - /*evergreen_pcie_gart_fini(rdev);*/ + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r700_cp_fini(rdev); + r600_wb_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); + evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; } if (rdev->accel_working) { @@ -746,15 +2210,12 @@ int evergreen_init(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev) { - evergreen_suspend(rdev); -#if 0 - r600_blit_fini(rdev); + /*r600_blit_fini(rdev);*/ + r700_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); evergreen_pcie_gart_fini(rdev); -#endif radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c new file mode 100644 index 000000000000..345a75a03c96 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -0,0 +1,1354 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#include "drmP.h" +#include "radeon.h" +#include "evergreend.h" +#include "evergreen_reg_safe.h" + +static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, + struct radeon_cs_reloc **cs_reloc); + +struct evergreen_cs_track { + u32 group_size; + u32 nbanks; + u32 npipes; + /* value we track */ + u32 nsamples; + u32 cb_color_base_last[12]; + struct radeon_bo *cb_color_bo[12]; + u32 cb_color_bo_offset[12]; + struct radeon_bo *cb_color_fmask_bo[8]; + struct radeon_bo *cb_color_cmask_bo[8]; + u32 cb_color_info[12]; + u32 cb_color_view[12]; + u32 cb_color_pitch_idx[12]; + u32 cb_color_slice_idx[12]; + u32 cb_color_dim_idx[12]; + u32 cb_color_dim[12]; + u32 cb_color_pitch[12]; + u32 cb_color_slice[12]; + u32 cb_color_cmask_slice[8]; + u32 cb_color_fmask_slice[8]; + u32 cb_target_mask; + u32 cb_shader_mask; + u32 vgt_strmout_config; + u32 vgt_strmout_buffer_config; + u32 db_depth_control; + u32 db_depth_view; + u32 db_depth_size; + u32 db_depth_size_idx; + u32 db_z_info; + u32 db_z_idx; + u32 db_z_read_offset; + u32 db_z_write_offset; + struct radeon_bo *db_z_read_bo; + struct radeon_bo *db_z_write_bo; + u32 db_s_info; + u32 db_s_idx; + u32 db_s_read_offset; + u32 db_s_write_offset; + struct radeon_bo *db_s_read_bo; + struct radeon_bo *db_s_write_bo; +}; + +static void evergreen_cs_track_init(struct evergreen_cs_track *track) +{ + int i; + + for (i = 0; i < 8; i++) { + track->cb_color_fmask_bo[i] = NULL; + track->cb_color_cmask_bo[i] = NULL; + track->cb_color_cmask_slice[i] = 0; + track->cb_color_fmask_slice[i] = 0; + } + + for (i = 0; i < 12; i++) { + track->cb_color_base_last[i] = 0; + track->cb_color_bo[i] = NULL; + track->cb_color_bo_offset[i] = 0xFFFFFFFF; + track->cb_color_info[i] = 0; + track->cb_color_view[i] = 0; + track->cb_color_pitch_idx[i] = 0; + track->cb_color_slice_idx[i] = 0; + track->cb_color_dim[i] = 0; + track->cb_color_pitch[i] = 0; + track->cb_color_slice[i] = 0; + track->cb_color_dim[i] = 0; + } + track->cb_target_mask = 0xFFFFFFFF; + track->cb_shader_mask = 0xFFFFFFFF; + + track->db_depth_view = 0xFFFFC000; + track->db_depth_size = 0xFFFFFFFF; + track->db_depth_size_idx = 0; + track->db_depth_control = 0xFFFFFFFF; + track->db_z_info = 0xFFFFFFFF; + track->db_z_idx = 0xFFFFFFFF; + track->db_z_read_offset = 0xFFFFFFFF; + track->db_z_write_offset = 0xFFFFFFFF; + track->db_z_read_bo = NULL; + track->db_z_write_bo = NULL; + track->db_s_info = 0xFFFFFFFF; + track->db_s_idx = 0xFFFFFFFF; + track->db_s_read_offset = 0xFFFFFFFF; + track->db_s_write_offset = 0xFFFFFFFF; + track->db_s_read_bo = NULL; + track->db_s_write_bo = NULL; +} + +static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) +{ + /* XXX fill in */ + return 0; +} + +static int evergreen_cs_track_check(struct radeon_cs_parser *p) +{ + struct evergreen_cs_track *track = p->track; + + /* we don't support stream out buffer yet */ + if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { + dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); + return -EINVAL; + } + + /* XXX fill in */ + return 0; +} + +/** + * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet + * @parser: parser structure holding parsing context. + * @pkt: where to store packet informations + * + * Assume that chunk_ib_index is properly set. Will return -EINVAL + * if packet is bigger than remaining ib size. or if packets is unknown. + **/ +int evergreen_cs_packet_parse(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx) +{ + struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; + uint32_t header; + + if (idx >= ib_chunk->length_dw) { + DRM_ERROR("Can not parse packet at %d after CS end %d !\n", + idx, ib_chunk->length_dw); + return -EINVAL; + } + header = radeon_get_ib_value(p, idx); + pkt->idx = idx; + pkt->type = CP_PACKET_GET_TYPE(header); + pkt->count = CP_PACKET_GET_COUNT(header); + pkt->one_reg_wr = 0; + switch (pkt->type) { + case PACKET_TYPE0: + pkt->reg = CP_PACKET0_GET_REG(header); + break; + case PACKET_TYPE3: + pkt->opcode = CP_PACKET3_GET_OPCODE(header); + break; + case PACKET_TYPE2: + pkt->count = -1; + break; + default: + DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); + return -EINVAL; + } + if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { + DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", + pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); + return -EINVAL; + } + return 0; +} + +/** + * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 + * @parser: parser structure holding parsing context. + * @data: pointer to relocation data + * @offset_start: starting offset + * @offset_mask: offset mask (to align start offset on) + * @reloc: reloc informations + * + * Check next packet is relocation packet3, do bo validation and compute + * GPU offset using the provided start. + **/ +static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, + struct radeon_cs_reloc **cs_reloc) +{ + struct radeon_cs_chunk *relocs_chunk; + struct radeon_cs_packet p3reloc; + unsigned idx; + int r; + + if (p->chunk_relocs_idx == -1) { + DRM_ERROR("No relocation chunk !\n"); + return -EINVAL; + } + *cs_reloc = NULL; + relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); + if (r) { + return r; + } + p->idx += p3reloc.count + 2; + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { + DRM_ERROR("No packet3 for relocation for packet at %d.\n", + p3reloc.idx); + return -EINVAL; + } + idx = radeon_get_ib_value(p, p3reloc.idx + 1); + if (idx >= relocs_chunk->length_dw) { + DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", + idx, relocs_chunk->length_dw); + return -EINVAL; + } + /* FIXME: we assume reloc size is 4 dwords */ + *cs_reloc = p->relocs_ptr[(idx / 4)]; + return 0; +} + +/** + * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc + * @parser: parser structure holding parsing context. + * + * Check next packet is relocation packet3, do bo validation and compute + * GPU offset using the provided start. + **/ +static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) +{ + struct radeon_cs_packet p3reloc; + int r; + + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); + if (r) { + return 0; + } + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { + return 0; + } + return 1; +} + +/** + * evergreen_cs_packet_next_vline() - parse userspace VLINE packet + * @parser: parser structure holding parsing context. + * + * Userspace sends a special sequence for VLINE waits. + * PACKET0 - VLINE_START_END + value + * PACKET3 - WAIT_REG_MEM poll vline status reg + * RELOC (P3) - crtc_id in reloc. + * + * This function parses this and relocates the VLINE START END + * and WAIT_REG_MEM packets to the correct crtc. + * It also detects a switched off crtc and nulls out the + * wait in that case. + */ +static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) +{ + struct drm_mode_object *obj; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + struct radeon_cs_packet p3reloc, wait_reg_mem; + int crtc_id; + int r; + uint32_t header, h_idx, reg, wait_reg_mem_info; + volatile uint32_t *ib; + + ib = p->ib->ptr; + + /* parse the WAIT_REG_MEM */ + r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); + if (r) + return r; + + /* check its a WAIT_REG_MEM */ + if (wait_reg_mem.type != PACKET_TYPE3 || + wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { + DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); + r = -EINVAL; + return r; + } + + wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); + /* bit 4 is reg (0) or mem (1) */ + if (wait_reg_mem_info & 0x10) { + DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); + r = -EINVAL; + return r; + } + /* waiting for value to be equal */ + if ((wait_reg_mem_info & 0x7) != 0x3) { + DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); + r = -EINVAL; + return r; + } + if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { + DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); + r = -EINVAL; + return r; + } + + if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { + DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); + r = -EINVAL; + return r; + } + + /* jump over the NOP */ + r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); + if (r) + return r; + + h_idx = p->idx - 2; + p->idx += wait_reg_mem.count + 2; + p->idx += p3reloc.count + 2; + + header = radeon_get_ib_value(p, h_idx); + crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); + reg = CP_PACKET0_GET_REG(header); + obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_ERROR("cannot find crtc %d\n", crtc_id); + r = -EINVAL; + goto out; + } + crtc = obj_to_crtc(obj); + radeon_crtc = to_radeon_crtc(crtc); + crtc_id = radeon_crtc->crtc_id; + + if (!crtc->enabled) { + /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ + ib[h_idx + 2] = PACKET2(0); + ib[h_idx + 3] = PACKET2(0); + ib[h_idx + 4] = PACKET2(0); + ib[h_idx + 5] = PACKET2(0); + ib[h_idx + 6] = PACKET2(0); + ib[h_idx + 7] = PACKET2(0); + ib[h_idx + 8] = PACKET2(0); + } else { + switch (reg) { + case EVERGREEN_VLINE_START_END: + header &= ~R600_CP_PACKET0_REG_MASK; + header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; + ib[h_idx] = header; + ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; + break; + default: + DRM_ERROR("unknown crtc reloc\n"); + r = -EINVAL; + goto out; + } + } +out: + return r; +} + +static int evergreen_packet0_check(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx, unsigned reg) +{ + int r; + + switch (reg) { + case EVERGREEN_VLINE_START_END: + r = evergreen_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + return r; + } + break; + default: + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", + reg, idx); + return -EINVAL; + } + return 0; +} + +static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt) +{ + unsigned reg, i; + unsigned idx; + int r; + + idx = pkt->idx + 1; + reg = pkt->reg; + for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { + r = evergreen_packet0_check(p, pkt, idx, reg); + if (r) { + return r; + } + } + return 0; +} + +/** + * evergreen_cs_check_reg() - check if register is authorized or not + * @parser: parser structure holding parsing context + * @reg: register we are testing + * @idx: index into the cs buffer + * + * This function will test against evergreen_reg_safe_bm and return 0 + * if register is safe. If register is not flag as safe this function + * will test it against a list of register needind special handling. + */ +static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) +{ + struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; + struct radeon_cs_reloc *reloc; + u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); + u32 m, i, tmp, *ib; + int r; + + i = (reg >> 7); + if (i > last_reg) { + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + m = 1 << ((reg >> 2) & 31); + if (!(evergreen_reg_safe_bm[i] & m)) + return 0; + ib = p->ib->ptr; + switch (reg) { + /* force following reg to 0 in an attemp to disable out buffer + * which will need us to better understand how it works to perform + * security check on it (Jerome) + */ + case SQ_ESGS_RING_SIZE: + case SQ_GSVS_RING_SIZE: + case SQ_ESTMP_RING_SIZE: + case SQ_GSTMP_RING_SIZE: + case SQ_HSTMP_RING_SIZE: + case SQ_LSTMP_RING_SIZE: + case SQ_PSTMP_RING_SIZE: + case SQ_VSTMP_RING_SIZE: + case SQ_ESGS_RING_ITEMSIZE: + case SQ_ESTMP_RING_ITEMSIZE: + case SQ_GSTMP_RING_ITEMSIZE: + case SQ_GSVS_RING_ITEMSIZE: + case SQ_GS_VERT_ITEMSIZE: + case SQ_GS_VERT_ITEMSIZE_1: + case SQ_GS_VERT_ITEMSIZE_2: + case SQ_GS_VERT_ITEMSIZE_3: + case SQ_GSVS_RING_OFFSET_1: + case SQ_GSVS_RING_OFFSET_2: + case SQ_GSVS_RING_OFFSET_3: + case SQ_HSTMP_RING_ITEMSIZE: + case SQ_LSTMP_RING_ITEMSIZE: + case SQ_PSTMP_RING_ITEMSIZE: + case SQ_VSTMP_RING_ITEMSIZE: + case VGT_TF_RING_SIZE: + /* get value to populate the IB don't remove */ + tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0; + break; + case DB_DEPTH_CONTROL: + track->db_depth_control = radeon_get_ib_value(p, idx); + break; + case DB_Z_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_info = radeon_get_ib_value(p, idx); + ib[idx] &= ~Z_ARRAY_MODE(0xf); + track->db_z_info &= ~Z_ARRAY_MODE(0xf); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else { + ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case DB_STENCIL_INFO: + track->db_s_info = radeon_get_ib_value(p, idx); + break; + case DB_DEPTH_VIEW: + track->db_depth_view = radeon_get_ib_value(p, idx); + break; + case DB_DEPTH_SIZE: + track->db_depth_size = radeon_get_ib_value(p, idx); + track->db_depth_size_idx = idx; + break; + case DB_Z_READ_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_read_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_z_read_bo = reloc->robj; + break; + case DB_Z_WRITE_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_z_write_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_z_write_bo = reloc->robj; + break; + case DB_STENCIL_READ_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_s_read_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_s_read_bo = reloc->robj; + break; + case DB_STENCIL_WRITE_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_s_write_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_s_write_bo = reloc->robj; + break; + case VGT_STRMOUT_CONFIG: + track->vgt_strmout_config = radeon_get_ib_value(p, idx); + break; + case VGT_STRMOUT_BUFFER_CONFIG: + track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); + break; + case CB_TARGET_MASK: + track->cb_target_mask = radeon_get_ib_value(p, idx); + break; + case CB_SHADER_MASK: + track->cb_shader_mask = radeon_get_ib_value(p, idx); + break; + case PA_SC_AA_CONFIG: + tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; + track->nsamples = 1 << tmp; + break; + case CB_COLOR0_VIEW: + case CB_COLOR1_VIEW: + case CB_COLOR2_VIEW: + case CB_COLOR3_VIEW: + case CB_COLOR4_VIEW: + case CB_COLOR5_VIEW: + case CB_COLOR6_VIEW: + case CB_COLOR7_VIEW: + tmp = (reg - CB_COLOR0_VIEW) / 0x3c; + track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR8_VIEW: + case CB_COLOR9_VIEW: + case CB_COLOR10_VIEW: + case CB_COLOR11_VIEW: + tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; + track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_INFO: + case CB_COLOR1_INFO: + case CB_COLOR2_INFO: + case CB_COLOR3_INFO: + case CB_COLOR4_INFO: + case CB_COLOR5_INFO: + case CB_COLOR6_INFO: + case CB_COLOR7_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - CB_COLOR0_INFO) / 0x3c; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case CB_COLOR8_INFO: + case CB_COLOR9_INFO: + case CB_COLOR10_INFO: + case CB_COLOR11_INFO: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + } + break; + case CB_COLOR0_PITCH: + case CB_COLOR1_PITCH: + case CB_COLOR2_PITCH: + case CB_COLOR3_PITCH: + case CB_COLOR4_PITCH: + case CB_COLOR5_PITCH: + case CB_COLOR6_PITCH: + case CB_COLOR7_PITCH: + tmp = (reg - CB_COLOR0_PITCH) / 0x3c; + track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_pitch_idx[tmp] = idx; + break; + case CB_COLOR8_PITCH: + case CB_COLOR9_PITCH: + case CB_COLOR10_PITCH: + case CB_COLOR11_PITCH: + tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; + track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_pitch_idx[tmp] = idx; + break; + case CB_COLOR0_SLICE: + case CB_COLOR1_SLICE: + case CB_COLOR2_SLICE: + case CB_COLOR3_SLICE: + case CB_COLOR4_SLICE: + case CB_COLOR5_SLICE: + case CB_COLOR6_SLICE: + case CB_COLOR7_SLICE: + tmp = (reg - CB_COLOR0_SLICE) / 0x3c; + track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; + break; + case CB_COLOR8_SLICE: + case CB_COLOR9_SLICE: + case CB_COLOR10_SLICE: + case CB_COLOR11_SLICE: + tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; + track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; + break; + case CB_COLOR0_ATTRIB: + case CB_COLOR1_ATTRIB: + case CB_COLOR2_ATTRIB: + case CB_COLOR3_ATTRIB: + case CB_COLOR4_ATTRIB: + case CB_COLOR5_ATTRIB: + case CB_COLOR6_ATTRIB: + case CB_COLOR7_ATTRIB: + case CB_COLOR8_ATTRIB: + case CB_COLOR9_ATTRIB: + case CB_COLOR10_ATTRIB: + case CB_COLOR11_ATTRIB: + break; + case CB_COLOR0_DIM: + case CB_COLOR1_DIM: + case CB_COLOR2_DIM: + case CB_COLOR3_DIM: + case CB_COLOR4_DIM: + case CB_COLOR5_DIM: + case CB_COLOR6_DIM: + case CB_COLOR7_DIM: + tmp = (reg - CB_COLOR0_DIM) / 0x3c; + track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_dim_idx[tmp] = idx; + break; + case CB_COLOR8_DIM: + case CB_COLOR9_DIM: + case CB_COLOR10_DIM: + case CB_COLOR11_DIM: + tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; + track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_dim_idx[tmp] = idx; + break; + case CB_COLOR0_FMASK: + case CB_COLOR1_FMASK: + case CB_COLOR2_FMASK: + case CB_COLOR3_FMASK: + case CB_COLOR4_FMASK: + case CB_COLOR5_FMASK: + case CB_COLOR6_FMASK: + case CB_COLOR7_FMASK: + tmp = (reg - CB_COLOR0_FMASK) / 0x3c; + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_fmask_bo[tmp] = reloc->robj; + break; + case CB_COLOR0_CMASK: + case CB_COLOR1_CMASK: + case CB_COLOR2_CMASK: + case CB_COLOR3_CMASK: + case CB_COLOR4_CMASK: + case CB_COLOR5_CMASK: + case CB_COLOR6_CMASK: + case CB_COLOR7_CMASK: + tmp = (reg - CB_COLOR0_CMASK) / 0x3c; + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_cmask_bo[tmp] = reloc->robj; + break; + case CB_COLOR0_FMASK_SLICE: + case CB_COLOR1_FMASK_SLICE: + case CB_COLOR2_FMASK_SLICE: + case CB_COLOR3_FMASK_SLICE: + case CB_COLOR4_FMASK_SLICE: + case CB_COLOR5_FMASK_SLICE: + case CB_COLOR6_FMASK_SLICE: + case CB_COLOR7_FMASK_SLICE: + tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; + track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_CMASK_SLICE: + case CB_COLOR1_CMASK_SLICE: + case CB_COLOR2_CMASK_SLICE: + case CB_COLOR3_CMASK_SLICE: + case CB_COLOR4_CMASK_SLICE: + case CB_COLOR5_CMASK_SLICE: + case CB_COLOR6_CMASK_SLICE: + case CB_COLOR7_CMASK_SLICE: + tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; + track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); + break; + case CB_COLOR0_BASE: + case CB_COLOR1_BASE: + case CB_COLOR2_BASE: + case CB_COLOR3_BASE: + case CB_COLOR4_BASE: + case CB_COLOR5_BASE: + case CB_COLOR6_BASE: + case CB_COLOR7_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - CB_COLOR0_BASE) / 0x3c; + track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_base_last[tmp] = ib[idx]; + track->cb_color_bo[tmp] = reloc->robj; + break; + case CB_COLOR8_BASE: + case CB_COLOR9_BASE: + case CB_COLOR10_BASE: + case CB_COLOR11_BASE: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; + track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_base_last[tmp] = ib[idx]; + track->cb_color_bo[tmp] = reloc->robj; + break; + case CB_IMMED0_BASE: + case CB_IMMED1_BASE: + case CB_IMMED2_BASE: + case CB_IMMED3_BASE: + case CB_IMMED4_BASE: + case CB_IMMED5_BASE: + case CB_IMMED6_BASE: + case CB_IMMED7_BASE: + case CB_IMMED8_BASE: + case CB_IMMED9_BASE: + case CB_IMMED10_BASE: + case CB_IMMED11_BASE: + case DB_HTILE_DATA_BASE: + case SQ_PGM_START_FS: + case SQ_PGM_START_ES: + case SQ_PGM_START_VS: + case SQ_PGM_START_GS: + case SQ_PGM_START_PS: + case SQ_PGM_START_HS: + case SQ_PGM_START_LS: + case GDS_ADDR_BASE: + case SQ_CONST_MEM_BASE: + case SQ_ALU_CONST_CACHE_GS_0: + case SQ_ALU_CONST_CACHE_GS_1: + case SQ_ALU_CONST_CACHE_GS_2: + case SQ_ALU_CONST_CACHE_GS_3: + case SQ_ALU_CONST_CACHE_GS_4: + case SQ_ALU_CONST_CACHE_GS_5: + case SQ_ALU_CONST_CACHE_GS_6: + case SQ_ALU_CONST_CACHE_GS_7: + case SQ_ALU_CONST_CACHE_GS_8: + case SQ_ALU_CONST_CACHE_GS_9: + case SQ_ALU_CONST_CACHE_GS_10: + case SQ_ALU_CONST_CACHE_GS_11: + case SQ_ALU_CONST_CACHE_GS_12: + case SQ_ALU_CONST_CACHE_GS_13: + case SQ_ALU_CONST_CACHE_GS_14: + case SQ_ALU_CONST_CACHE_GS_15: + case SQ_ALU_CONST_CACHE_PS_0: + case SQ_ALU_CONST_CACHE_PS_1: + case SQ_ALU_CONST_CACHE_PS_2: + case SQ_ALU_CONST_CACHE_PS_3: + case SQ_ALU_CONST_CACHE_PS_4: + case SQ_ALU_CONST_CACHE_PS_5: + case SQ_ALU_CONST_CACHE_PS_6: + case SQ_ALU_CONST_CACHE_PS_7: + case SQ_ALU_CONST_CACHE_PS_8: + case SQ_ALU_CONST_CACHE_PS_9: + case SQ_ALU_CONST_CACHE_PS_10: + case SQ_ALU_CONST_CACHE_PS_11: + case SQ_ALU_CONST_CACHE_PS_12: + case SQ_ALU_CONST_CACHE_PS_13: + case SQ_ALU_CONST_CACHE_PS_14: + case SQ_ALU_CONST_CACHE_PS_15: + case SQ_ALU_CONST_CACHE_VS_0: + case SQ_ALU_CONST_CACHE_VS_1: + case SQ_ALU_CONST_CACHE_VS_2: + case SQ_ALU_CONST_CACHE_VS_3: + case SQ_ALU_CONST_CACHE_VS_4: + case SQ_ALU_CONST_CACHE_VS_5: + case SQ_ALU_CONST_CACHE_VS_6: + case SQ_ALU_CONST_CACHE_VS_7: + case SQ_ALU_CONST_CACHE_VS_8: + case SQ_ALU_CONST_CACHE_VS_9: + case SQ_ALU_CONST_CACHE_VS_10: + case SQ_ALU_CONST_CACHE_VS_11: + case SQ_ALU_CONST_CACHE_VS_12: + case SQ_ALU_CONST_CACHE_VS_13: + case SQ_ALU_CONST_CACHE_VS_14: + case SQ_ALU_CONST_CACHE_VS_15: + case SQ_ALU_CONST_CACHE_HS_0: + case SQ_ALU_CONST_CACHE_HS_1: + case SQ_ALU_CONST_CACHE_HS_2: + case SQ_ALU_CONST_CACHE_HS_3: + case SQ_ALU_CONST_CACHE_HS_4: + case SQ_ALU_CONST_CACHE_HS_5: + case SQ_ALU_CONST_CACHE_HS_6: + case SQ_ALU_CONST_CACHE_HS_7: + case SQ_ALU_CONST_CACHE_HS_8: + case SQ_ALU_CONST_CACHE_HS_9: + case SQ_ALU_CONST_CACHE_HS_10: + case SQ_ALU_CONST_CACHE_HS_11: + case SQ_ALU_CONST_CACHE_HS_12: + case SQ_ALU_CONST_CACHE_HS_13: + case SQ_ALU_CONST_CACHE_HS_14: + case SQ_ALU_CONST_CACHE_HS_15: + case SQ_ALU_CONST_CACHE_LS_0: + case SQ_ALU_CONST_CACHE_LS_1: + case SQ_ALU_CONST_CACHE_LS_2: + case SQ_ALU_CONST_CACHE_LS_3: + case SQ_ALU_CONST_CACHE_LS_4: + case SQ_ALU_CONST_CACHE_LS_5: + case SQ_ALU_CONST_CACHE_LS_6: + case SQ_ALU_CONST_CACHE_LS_7: + case SQ_ALU_CONST_CACHE_LS_8: + case SQ_ALU_CONST_CACHE_LS_9: + case SQ_ALU_CONST_CACHE_LS_10: + case SQ_ALU_CONST_CACHE_LS_11: + case SQ_ALU_CONST_CACHE_LS_12: + case SQ_ALU_CONST_CACHE_LS_13: + case SQ_ALU_CONST_CACHE_LS_14: + case SQ_ALU_CONST_CACHE_LS_15: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; + default: + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + return 0; +} + +/** + * evergreen_check_texture_resource() - check if register is authorized or not + * @p: parser structure holding parsing context + * @idx: index into the cs buffer + * @texture: texture's bo structure + * @mipmap: mipmap's bo structure + * + * This function will check that the resource has valid field and that + * the texture and mipmap bo object are big enough to cover this resource. + */ +static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, + struct radeon_bo *texture, + struct radeon_bo *mipmap) +{ + /* XXX fill in */ + return 0; +} + +static int evergreen_packet3_check(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt) +{ + struct radeon_cs_reloc *reloc; + struct evergreen_cs_track *track; + volatile u32 *ib; + unsigned idx; + unsigned i; + unsigned start_reg, end_reg, reg; + int r; + u32 idx_value; + + track = (struct evergreen_cs_track *)p->track; + ib = p->ib->ptr; + idx = pkt->idx + 1; + idx_value = radeon_get_ib_value(p, idx); + + switch (pkt->opcode) { + case PACKET3_CONTEXT_CONTROL: + if (pkt->count != 1) { + DRM_ERROR("bad CONTEXT_CONTROL\n"); + return -EINVAL; + } + break; + case PACKET3_INDEX_TYPE: + case PACKET3_NUM_INSTANCES: + case PACKET3_CLEAR_STATE: + if (pkt->count) { + DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + return -EINVAL; + } + break; + case PACKET3_INDEX_BASE: + if (pkt->count != 1) { + DRM_ERROR("bad INDEX_BASE\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad INDEX_BASE\n"); + return -EINVAL; + } + ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX: + if (pkt->count != 3) { + DRM_ERROR("bad DRAW_INDEX\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad DRAW_INDEX\n"); + return -EINVAL; + } + ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_2: + if (pkt->count != 4) { + DRM_ERROR("bad DRAW_INDEX_2\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad DRAW_INDEX_2\n"); + return -EINVAL; + } + ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_AUTO: + if (pkt->count != 1) { + DRM_ERROR("bad DRAW_INDEX_AUTO\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } + break; + case PACKET3_DRAW_INDEX_MULTI_AUTO: + if (pkt->count != 2) { + DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } + break; + case PACKET3_DRAW_INDEX_IMMD: + if (pkt->count < 2) { + DRM_ERROR("bad DRAW_INDEX_IMMD\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_OFFSET: + if (pkt->count != 2) { + DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_DRAW_INDEX_OFFSET_2: + if (pkt->count != 3) { + DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); + return -EINVAL; + } + r = evergreen_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } + break; + case PACKET3_WAIT_REG_MEM: + if (pkt->count != 5) { + DRM_ERROR("bad WAIT_REG_MEM\n"); + return -EINVAL; + } + /* bit 4 is reg (0) or mem (1) */ + if (idx_value & 0x10) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad WAIT_REG_MEM\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + } + break; + case PACKET3_SURFACE_SYNC: + if (pkt->count != 3) { + DRM_ERROR("bad SURFACE_SYNC\n"); + return -EINVAL; + } + /* 0xffffffff/0x0 is flush all cache flag */ + if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || + radeon_get_ib_value(p, idx + 2) != 0) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SURFACE_SYNC\n"); + return -EINVAL; + } + ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + break; + case PACKET3_EVENT_WRITE: + if (pkt->count != 2 && pkt->count != 0) { + DRM_ERROR("bad EVENT_WRITE\n"); + return -EINVAL; + } + if (pkt->count) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + } + break; + case PACKET3_EVENT_WRITE_EOP: + if (pkt->count != 4) { + DRM_ERROR("bad EVENT_WRITE_EOP\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE_EOP\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case PACKET3_EVENT_WRITE_EOS: + if (pkt->count != 3) { + DRM_ERROR("bad EVENT_WRITE_EOS\n"); + return -EINVAL; + } + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad EVENT_WRITE_EOS\n"); + return -EINVAL; + } + ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case PACKET3_SET_CONFIG_REG: + start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CONFIG_REG_START) || + (start_reg >= PACKET3_SET_CONFIG_REG_END) || + (end_reg >= PACKET3_SET_CONFIG_REG_END)) { + DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + return -EINVAL; + } + for (i = 0; i < pkt->count; i++) { + reg = start_reg + (4 * i); + r = evergreen_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; + } + break; + case PACKET3_SET_CONTEXT_REG: + start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || + (start_reg >= PACKET3_SET_CONTEXT_REG_END) || + (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { + DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); + return -EINVAL; + } + for (i = 0; i < pkt->count; i++) { + reg = start_reg + (4 * i); + r = evergreen_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; + } + break; + case PACKET3_SET_RESOURCE: + if (pkt->count % 8) { + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_RESOURCE_START) || + (start_reg >= PACKET3_SET_RESOURCE_END) || + (end_reg >= PACKET3_SET_RESOURCE_END)) { + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + for (i = 0; i < (pkt->count / 8); i++) { + struct radeon_bo *texture, *mipmap; + u32 size, offset; + + switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { + case SQ_TEX_VTX_VALID_TEXTURE: + /* tex base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (tex)\n"); + return -EINVAL; + } + ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + texture = reloc->robj; + /* tex mip base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (tex)\n"); + return -EINVAL; + } + ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + mipmap = reloc->robj; + r = evergreen_check_texture_resource(p, idx+1+(i*8), + texture, mipmap); + if (r) + return r; + break; + case SQ_TEX_VTX_VALID_BUFFER: + /* vtx base */ + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (vtx)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx+1+(i*8)+0); + size = radeon_get_ib_value(p, idx+1+(i*8)+1); + if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { + /* force size to size of the buffer */ + dev_warn(p->dev, "vbo resource seems too big for the bo\n"); + ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); + } + ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); + ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + break; + case SQ_TEX_VTX_INVALID_TEXTURE: + case SQ_TEX_VTX_INVALID_BUFFER: + default: + DRM_ERROR("bad SET_RESOURCE\n"); + return -EINVAL; + } + } + break; + case PACKET3_SET_ALU_CONST: + /* XXX fix me ALU const buffers only */ + break; + case PACKET3_SET_BOOL_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_BOOL_CONST_START) || + (start_reg >= PACKET3_SET_BOOL_CONST_END) || + (end_reg >= PACKET3_SET_BOOL_CONST_END)) { + DRM_ERROR("bad SET_BOOL_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_LOOP_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_LOOP_CONST_START) || + (start_reg >= PACKET3_SET_LOOP_CONST_END) || + (end_reg >= PACKET3_SET_LOOP_CONST_END)) { + DRM_ERROR("bad SET_LOOP_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_CTL_CONST: + start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_CTL_CONST_START) || + (start_reg >= PACKET3_SET_CTL_CONST_END) || + (end_reg >= PACKET3_SET_CTL_CONST_END)) { + DRM_ERROR("bad SET_CTL_CONST\n"); + return -EINVAL; + } + break; + case PACKET3_SET_SAMPLER: + if (pkt->count % 3) { + DRM_ERROR("bad SET_SAMPLER\n"); + return -EINVAL; + } + start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_SAMPLER_START) || + (start_reg >= PACKET3_SET_SAMPLER_END) || + (end_reg >= PACKET3_SET_SAMPLER_END)) { + DRM_ERROR("bad SET_SAMPLER\n"); + return -EINVAL; + } + break; + case PACKET3_NOP: + break; + default: + DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + return -EINVAL; + } + return 0; +} + +int evergreen_cs_parse(struct radeon_cs_parser *p) +{ + struct radeon_cs_packet pkt; + struct evergreen_cs_track *track; + int r; + + if (p->track == NULL) { + /* initialize tracker, we are in kms */ + track = kzalloc(sizeof(*track), GFP_KERNEL); + if (track == NULL) + return -ENOMEM; + evergreen_cs_track_init(track); + track->npipes = p->rdev->config.evergreen.tiling_npipes; + track->nbanks = p->rdev->config.evergreen.tiling_nbanks; + track->group_size = p->rdev->config.evergreen.tiling_group_size; + p->track = track; + } + do { + r = evergreen_cs_packet_parse(p, &pkt, p->idx); + if (r) { + kfree(p->track); + p->track = NULL; + return r; + } + p->idx += pkt.count + 2; + switch (pkt.type) { + case PACKET_TYPE0: + r = evergreen_cs_parse_packet0(p, &pkt); + break; + case PACKET_TYPE2: + break; + case PACKET_TYPE3: + r = evergreen_packet3_check(p, &pkt); + break; + default: + DRM_ERROR("Unknown packet type %d !\n", pkt.type); + kfree(p->track); + p->track = NULL; + return -EINVAL; + } + if (r) { + kfree(p->track); + p->track = NULL; + return r; + } + } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); +#if 0 + for (r = 0; r < p->ib->length_dw; r++) { + printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); + mdelay(1); + } +#endif + kfree(p->track); + p->track = NULL; + return 0; +} + diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index f7c7c9643433..e028c1cd9d9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -151,6 +151,9 @@ #define EVERGREEN_DATA_FORMAT 0x6b00 # define EVERGREEN_INTERLEAVE_EN (1 << 0) #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 +#define EVERGREEN_VLINE_START_END 0x6b08 +#define EVERGREEN_VLINE_STATUS 0x6bb8 +# define EVERGREEN_VLINE_STAT (1 << 12) #define EVERGREEN_VIEWPORT_START 0x6d70 #define EVERGREEN_VIEWPORT_SIZE 0x6d74 @@ -164,8 +167,12 @@ #define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ +#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34 #define EVERGREEN_CRTC_CONTROL 0x6e70 # define EVERGREEN_CRTC_MASTER_EN (1 << 0) +# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) +#define EVERGREEN_CRTC_STATUS 0x6e8c +#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h new file mode 100644 index 000000000000..a1cd621780e2 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -0,0 +1,1023 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#ifndef EVERGREEND_H +#define EVERGREEND_H + +#define EVERGREEN_MAX_SH_GPRS 256 +#define EVERGREEN_MAX_TEMP_GPRS 16 +#define EVERGREEN_MAX_SH_THREADS 256 +#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 +#define EVERGREEN_MAX_FRC_EOV_CNT 16384 +#define EVERGREEN_MAX_BACKENDS 8 +#define EVERGREEN_MAX_BACKENDS_MASK 0xFF +#define EVERGREEN_MAX_SIMDS 16 +#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF +#define EVERGREEN_MAX_PIPES 8 +#define EVERGREEN_MAX_PIPES_MASK 0xFF +#define EVERGREEN_MAX_LDS_NUM 0xFFFF + +/* Registers */ + +#define RCU_IND_INDEX 0x100 +#define RCU_IND_DATA 0x104 + +#define GRBM_GFX_INDEX 0x802C +#define INSTANCE_INDEX(x) ((x) << 0) +#define SE_INDEX(x) ((x) << 16) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) +#define RLC_GFX_INDEX 0x3fC4 +#define CC_GC_SHADER_PIPE_CONFIG 0x8950 +#define WRITE_DIS (1 << 0) +#define CC_RB_BACKEND_DISABLE 0x98F4 +#define BACKEND_DISABLE(x) ((x) << 16) +#define GB_ADDR_CONFIG 0x98F8 +#define NUM_PIPES(x) ((x) << 0) +#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) +#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) +#define NUM_SHADER_ENGINES(x) ((x) << 12) +#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) +#define NUM_GPUS(x) ((x) << 20) +#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) +#define ROW_SIZE(x) ((x) << 28) +#define GB_BACKEND_MAP 0x98FC +#define DMIF_ADDR_CONFIG 0xBD4 +#define HDP_ADDR_CONFIG 0x2F48 + +#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 +#define GC_USER_RB_BACKEND_DISABLE 0x9B7C + +#define CGTS_SYS_TCC_DISABLE 0x3F90 +#define CGTS_TCC_DISABLE 0x9148 +#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 +#define CGTS_USER_TCC_DISABLE 0x914C + +#define CONFIG_MEMSIZE 0x5428 + +#define CP_ME_CNTL 0x86D8 +#define CP_ME_HALT (1 << 28) +#define CP_PFP_HALT (1 << 26) +#define CP_ME_RAM_DATA 0xC160 +#define CP_ME_RAM_RADDR 0xC158 +#define CP_ME_RAM_WADDR 0xC15C +#define CP_MEQ_THRESHOLDS 0x8764 +#define STQ_SPLIT(x) ((x) << 0) +#define CP_PERFMON_CNTL 0x87FC +#define CP_PFP_UCODE_ADDR 0xC150 +#define CP_PFP_UCODE_DATA 0xC154 +#define CP_QUEUE_THRESHOLDS 0x8760 +#define ROQ_IB1_START(x) ((x) << 0) +#define ROQ_IB2_START(x) ((x) << 8) +#define CP_RB_BASE 0xC100 +#define CP_RB_CNTL 0xC104 +#define RB_BUFSZ(x) ((x) << 0) +#define RB_BLKSZ(x) ((x) << 8) +#define RB_NO_UPDATE (1 << 27) +#define RB_RPTR_WR_ENA (1 << 31) +#define BUF_SWAP_32BIT (2 << 16) +#define CP_RB_RPTR 0x8700 +#define CP_RB_RPTR_ADDR 0xC10C +#define CP_RB_RPTR_ADDR_HI 0xC110 +#define CP_RB_RPTR_WR 0xC108 +#define CP_RB_WPTR 0xC114 +#define CP_RB_WPTR_ADDR 0xC118 +#define CP_RB_WPTR_ADDR_HI 0xC11C +#define CP_RB_WPTR_DELAY 0x8704 +#define CP_SEM_WAIT_TIMER 0x85BC +#define CP_DEBUG 0xC1FC + + +#define GC_USER_SHADER_PIPE_CONFIG 0x8954 +#define INACTIVE_QD_PIPES(x) ((x) << 8) +#define INACTIVE_QD_PIPES_MASK 0x0000FF00 +#define INACTIVE_SIMDS(x) ((x) << 16) +#define INACTIVE_SIMDS_MASK 0x00FF0000 + +#define GRBM_CNTL 0x8000 +#define GRBM_READ_TIMEOUT(x) ((x) << 0) +#define GRBM_SOFT_RESET 0x8020 +#define SOFT_RESET_CP (1 << 0) +#define SOFT_RESET_CB (1 << 1) +#define SOFT_RESET_DB (1 << 3) +#define SOFT_RESET_PA (1 << 5) +#define SOFT_RESET_SC (1 << 6) +#define SOFT_RESET_SPI (1 << 8) +#define SOFT_RESET_SH (1 << 9) +#define SOFT_RESET_SX (1 << 10) +#define SOFT_RESET_TC (1 << 11) +#define SOFT_RESET_TA (1 << 12) +#define SOFT_RESET_VC (1 << 13) +#define SOFT_RESET_VGT (1 << 14) + +#define GRBM_STATUS 0x8010 +#define CMDFIFO_AVAIL_MASK 0x0000000F +#define SRBM_RQ_PENDING (1 << 5) +#define CF_RQ_PENDING (1 << 7) +#define PF_RQ_PENDING (1 << 8) +#define GRBM_EE_BUSY (1 << 10) +#define SX_CLEAN (1 << 11) +#define DB_CLEAN (1 << 12) +#define CB_CLEAN (1 << 13) +#define TA_BUSY (1 << 14) +#define VGT_BUSY_NO_DMA (1 << 16) +#define VGT_BUSY (1 << 17) +#define SX_BUSY (1 << 20) +#define SH_BUSY (1 << 21) +#define SPI_BUSY (1 << 22) +#define SC_BUSY (1 << 24) +#define PA_BUSY (1 << 25) +#define DB_BUSY (1 << 26) +#define CP_COHERENCY_BUSY (1 << 28) +#define CP_BUSY (1 << 29) +#define CB_BUSY (1 << 30) +#define GUI_ACTIVE (1 << 31) +#define GRBM_STATUS_SE0 0x8014 +#define GRBM_STATUS_SE1 0x8018 +#define SE_SX_CLEAN (1 << 0) +#define SE_DB_CLEAN (1 << 1) +#define SE_CB_CLEAN (1 << 2) +#define SE_TA_BUSY (1 << 25) +#define SE_SX_BUSY (1 << 26) +#define SE_SPI_BUSY (1 << 27) +#define SE_SH_BUSY (1 << 28) +#define SE_SC_BUSY (1 << 29) +#define SE_DB_BUSY (1 << 30) +#define SE_CB_BUSY (1 << 31) + +#define HDP_HOST_PATH_CNTL 0x2C00 +#define HDP_NONSURFACE_BASE 0x2C04 +#define HDP_NONSURFACE_INFO 0x2C08 +#define HDP_NONSURFACE_SIZE 0x2C0C +#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 +#define HDP_TILING_CONFIG 0x2F3C + +#define MC_SHARED_CHMAP 0x2004 +#define NOOFCHAN_SHIFT 12 +#define NOOFCHAN_MASK 0x00003000 + +#define MC_ARB_RAMCFG 0x2760 +#define NOOFBANK_SHIFT 0 +#define NOOFBANK_MASK 0x00000003 +#define NOOFRANK_SHIFT 2 +#define NOOFRANK_MASK 0x00000004 +#define NOOFROWS_SHIFT 3 +#define NOOFROWS_MASK 0x00000038 +#define NOOFCOLS_SHIFT 6 +#define NOOFCOLS_MASK 0x000000C0 +#define CHANSIZE_SHIFT 8 +#define CHANSIZE_MASK 0x00000100 +#define BURSTLENGTH_SHIFT 9 +#define BURSTLENGTH_MASK 0x00000200 +#define CHANSIZE_OVERRIDE (1 << 11) +#define MC_VM_AGP_TOP 0x2028 +#define MC_VM_AGP_BOT 0x202C +#define MC_VM_AGP_BASE 0x2030 +#define MC_VM_FB_LOCATION 0x2024 +#define MC_VM_MB_L1_TLB0_CNTL 0x2234 +#define MC_VM_MB_L1_TLB1_CNTL 0x2238 +#define MC_VM_MB_L1_TLB2_CNTL 0x223C +#define MC_VM_MB_L1_TLB3_CNTL 0x2240 +#define ENABLE_L1_TLB (1 << 0) +#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) +#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) +#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) +#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) +#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) +#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) +#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) +#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) +#define MC_VM_MD_L1_TLB0_CNTL 0x2654 +#define MC_VM_MD_L1_TLB1_CNTL 0x2658 +#define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C +#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 +#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 + +#define PA_CL_ENHANCE 0x8A14 +#define CLIP_VTX_REORDER_ENA (1 << 0) +#define NUM_CLIP_SEQ(x) ((x) << 1) +#define PA_SC_AA_CONFIG 0x28C04 +#define MSAA_NUM_SAMPLES_SHIFT 0 +#define MSAA_NUM_SAMPLES_MASK 0x3 +#define PA_SC_CLIPRECT_RULE 0x2820C +#define PA_SC_EDGERULE 0x28230 +#define PA_SC_FIFO_SIZE 0x8BCC +#define SC_PRIM_FIFO_SIZE(x) ((x) << 0) +#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) +#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) +#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 +#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) +#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) +#define PA_SC_LINE_STIPPLE 0x28A0C +#define PA_SC_LINE_STIPPLE_STATE 0x8B10 + +#define SCRATCH_REG0 0x8500 +#define SCRATCH_REG1 0x8504 +#define SCRATCH_REG2 0x8508 +#define SCRATCH_REG3 0x850C +#define SCRATCH_REG4 0x8510 +#define SCRATCH_REG5 0x8514 +#define SCRATCH_REG6 0x8518 +#define SCRATCH_REG7 0x851C +#define SCRATCH_UMSK 0x8540 +#define SCRATCH_ADDR 0x8544 + +#define SMX_DC_CTL0 0xA020 +#define USE_HASH_FUNCTION (1 << 0) +#define NUMBER_OF_SETS(x) ((x) << 1) +#define FLUSH_ALL_ON_EVENT (1 << 10) +#define STALL_ON_EVENT (1 << 11) +#define SMX_EVENT_CTL 0xA02C +#define ES_FLUSH_CTL(x) ((x) << 0) +#define GS_FLUSH_CTL(x) ((x) << 3) +#define ACK_FLUSH_CTL(x) ((x) << 6) +#define SYNC_FLUSH_CTL (1 << 8) + +#define SPI_CONFIG_CNTL 0x9100 +#define GPR_WRITE_PRIORITY(x) ((x) << 0) +#define SPI_CONFIG_CNTL_1 0x913C +#define VTX_DONE_DELAY(x) ((x) << 0) +#define INTERP_ONE_PRIM_PER_ROW (1 << 4) +#define SPI_INPUT_Z 0x286D8 +#define SPI_PS_IN_CONTROL_0 0x286CC +#define NUM_INTERP(x) ((x)<<0) +#define POSITION_ENA (1<<8) +#define POSITION_CENTROID (1<<9) +#define POSITION_ADDR(x) ((x)<<10) +#define PARAM_GEN(x) ((x)<<15) +#define PARAM_GEN_ADDR(x) ((x)<<19) +#define BARYC_SAMPLE_CNTL(x) ((x)<<26) +#define PERSP_GRADIENT_ENA (1<<28) +#define LINEAR_GRADIENT_ENA (1<<29) +#define POSITION_SAMPLE (1<<30) +#define BARYC_AT_SAMPLE_ENA (1<<31) + +#define SQ_CONFIG 0x8C00 +#define VC_ENABLE (1 << 0) +#define EXPORT_SRC_C (1 << 1) +#define CS_PRIO(x) ((x) << 18) +#define LS_PRIO(x) ((x) << 20) +#define HS_PRIO(x) ((x) << 22) +#define PS_PRIO(x) ((x) << 24) +#define VS_PRIO(x) ((x) << 26) +#define GS_PRIO(x) ((x) << 28) +#define ES_PRIO(x) ((x) << 30) +#define SQ_GPR_RESOURCE_MGMT_1 0x8C04 +#define NUM_PS_GPRS(x) ((x) << 0) +#define NUM_VS_GPRS(x) ((x) << 16) +#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) +#define SQ_GPR_RESOURCE_MGMT_2 0x8C08 +#define NUM_GS_GPRS(x) ((x) << 0) +#define NUM_ES_GPRS(x) ((x) << 16) +#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C +#define NUM_HS_GPRS(x) ((x) << 0) +#define NUM_LS_GPRS(x) ((x) << 16) +#define SQ_THREAD_RESOURCE_MGMT 0x8C18 +#define NUM_PS_THREADS(x) ((x) << 0) +#define NUM_VS_THREADS(x) ((x) << 8) +#define NUM_GS_THREADS(x) ((x) << 16) +#define NUM_ES_THREADS(x) ((x) << 24) +#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C +#define NUM_HS_THREADS(x) ((x) << 0) +#define NUM_LS_THREADS(x) ((x) << 8) +#define SQ_STACK_RESOURCE_MGMT_1 0x8C20 +#define NUM_PS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_VS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_2 0x8C24 +#define NUM_GS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_ES_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_3 0x8C28 +#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C +#define SQ_LDS_RESOURCE_MGMT 0x8E2C + +#define SQ_MS_FIFO_SIZES 0x8CF0 +#define CACHE_FIFO_SIZE(x) ((x) << 0) +#define FETCH_FIFO_HIWATER(x) ((x) << 8) +#define DONE_FIFO_HIWATER(x) ((x) << 16) +#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) + +#define SX_DEBUG_1 0x9058 +#define ENABLE_NEW_SMX_ADDRESS (1 << 16) +#define SX_EXPORT_BUFFER_SIZES 0x900C +#define COLOR_BUFFER_SIZE(x) ((x) << 0) +#define POSITION_BUFFER_SIZE(x) ((x) << 8) +#define SMX_BUFFER_SIZE(x) ((x) << 16) +#define SX_MISC 0x28350 + +#define CB_PERF_CTR0_SEL_0 0x9A20 +#define CB_PERF_CTR0_SEL_1 0x9A24 +#define CB_PERF_CTR1_SEL_0 0x9A28 +#define CB_PERF_CTR1_SEL_1 0x9A2C +#define CB_PERF_CTR2_SEL_0 0x9A30 +#define CB_PERF_CTR2_SEL_1 0x9A34 +#define CB_PERF_CTR3_SEL_0 0x9A38 +#define CB_PERF_CTR3_SEL_1 0x9A3C + +#define TA_CNTL_AUX 0x9508 +#define DISABLE_CUBE_WRAP (1 << 0) +#define DISABLE_CUBE_ANISO (1 << 1) +#define SYNC_GRADIENT (1 << 24) +#define SYNC_WALKER (1 << 25) +#define SYNC_ALIGNER (1 << 26) + +#define VGT_CACHE_INVALIDATION 0x88C4 +#define CACHE_INVALIDATION(x) ((x) << 0) +#define VC_ONLY 0 +#define TC_ONLY 1 +#define VC_AND_TC 2 +#define AUTO_INVLD_EN(x) ((x) << 6) +#define NO_AUTO 0 +#define ES_AUTO 1 +#define GS_AUTO 2 +#define ES_AND_GS_AUTO 3 +#define VGT_GS_VERTEX_REUSE 0x88D4 +#define VGT_NUM_INSTANCES 0x8974 +#define VGT_OUT_DEALLOC_CNTL 0x28C5C +#define DEALLOC_DIST_MASK 0x0000007F +#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 +#define VTX_REUSE_DEPTH_MASK 0x000000FF + +#define VM_CONTEXT0_CNTL 0x1410 +#define ENABLE_CONTEXT (1 << 0) +#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) +#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) +#define VM_CONTEXT1_CNTL 0x1414 +#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C +#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C +#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C +#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 +#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 +#define REQUEST_TYPE(x) (((x) & 0xf) << 0) +#define RESPONSE_TYPE_MASK 0x000000F0 +#define RESPONSE_TYPE_SHIFT 4 +#define VM_L2_CNTL 0x1400 +#define ENABLE_L2_CACHE (1 << 0) +#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) +#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) +#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) +#define VM_L2_CNTL2 0x1404 +#define INVALIDATE_ALL_L1_TLBS (1 << 0) +#define INVALIDATE_L2_CACHE (1 << 1) +#define VM_L2_CNTL3 0x1408 +#define BANK_SELECT(x) ((x) << 0) +#define CACHE_UPDATE_MODE(x) ((x) << 6) +#define VM_L2_STATUS 0x140C +#define L2_BUSY (1 << 0) + +#define WAIT_UNTIL 0x8040 + +#define SRBM_STATUS 0x0E50 +#define SRBM_SOFT_RESET 0x0E60 +#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 +#define SOFT_RESET_BIF (1 << 1) +#define SOFT_RESET_CG (1 << 2) +#define SOFT_RESET_DC (1 << 5) +#define SOFT_RESET_GRBM (1 << 8) +#define SOFT_RESET_HDP (1 << 9) +#define SOFT_RESET_IH (1 << 10) +#define SOFT_RESET_MC (1 << 11) +#define SOFT_RESET_RLC (1 << 13) +#define SOFT_RESET_ROM (1 << 14) +#define SOFT_RESET_SEM (1 << 15) +#define SOFT_RESET_VMC (1 << 17) +#define SOFT_RESET_TST (1 << 21) +#define SOFT_RESET_REGBB (1 << 22) +#define SOFT_RESET_ORB (1 << 23) + +#define IH_RB_CNTL 0x3e00 +# define IH_RB_ENABLE (1 << 0) +# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) +# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) +# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ +# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) +# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +#define IH_RB_BASE 0x3e04 +#define IH_RB_RPTR 0x3e08 +#define IH_RB_WPTR 0x3e0c +# define RB_OVERFLOW (1 << 0) +# define WPTR_OFFSET_MASK 0x3fffc +#define IH_RB_WPTR_ADDR_HI 0x3e10 +#define IH_RB_WPTR_ADDR_LO 0x3e14 +#define IH_CNTL 0x3e18 +# define ENABLE_INTR (1 << 0) +# define IH_MC_SWAP(x) ((x) << 2) +# define IH_MC_SWAP_NONE 0 +# define IH_MC_SWAP_16BIT 1 +# define IH_MC_SWAP_32BIT 2 +# define IH_MC_SWAP_64BIT 3 +# define RPTR_REARM (1 << 4) +# define MC_WRREQ_CREDIT(x) ((x) << 15) +# define MC_WR_CLEAN_CNT(x) ((x) << 20) + +#define CP_INT_CNTL 0xc124 +# define CNTX_BUSY_INT_ENABLE (1 << 19) +# define CNTX_EMPTY_INT_ENABLE (1 << 20) +# define SCRATCH_INT_ENABLE (1 << 25) +# define TIME_STAMP_INT_ENABLE (1 << 26) +# define IB2_INT_ENABLE (1 << 29) +# define IB1_INT_ENABLE (1 << 30) +# define RB_INT_ENABLE (1 << 31) +#define CP_INT_STATUS 0xc128 +# define SCRATCH_INT_STAT (1 << 25) +# define TIME_STAMP_INT_STAT (1 << 26) +# define IB2_INT_STAT (1 << 29) +# define IB1_INT_STAT (1 << 30) +# define RB_INT_STAT (1 << 31) + +#define GRBM_INT_CNTL 0x8060 +# define RDERR_INT_ENABLE (1 << 0) +# define GUI_IDLE_INT_ENABLE (1 << 19) + +/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ +#define CRTC_STATUS_FRAME_COUNT 0x6e98 + +/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ +#define VLINE_STATUS 0x6bb8 +# define VLINE_OCCURRED (1 << 0) +# define VLINE_ACK (1 << 4) +# define VLINE_STAT (1 << 12) +# define VLINE_INTERRUPT (1 << 16) +# define VLINE_INTERRUPT_TYPE (1 << 17) +/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ +#define VBLANK_STATUS 0x6bbc +# define VBLANK_OCCURRED (1 << 0) +# define VBLANK_ACK (1 << 4) +# define VBLANK_STAT (1 << 12) +# define VBLANK_INTERRUPT (1 << 16) +# define VBLANK_INTERRUPT_TYPE (1 << 17) + +/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ +#define INT_MASK 0x6b40 +# define VBLANK_INT_MASK (1 << 0) +# define VLINE_INT_MASK (1 << 4) + +#define DISP_INTERRUPT_STATUS 0x60f4 +# define LB_D1_VLINE_INTERRUPT (1 << 2) +# define LB_D1_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD1_INTERRUPT (1 << 17) +# define DC_HPD1_RX_INTERRUPT (1 << 18) +# define DACA_AUTODETECT_INTERRUPT (1 << 22) +# define DACB_AUTODETECT_INTERRUPT (1 << 23) +# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) +# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) +#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 +# define LB_D2_VLINE_INTERRUPT (1 << 2) +# define LB_D2_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD2_INTERRUPT (1 << 17) +# define DC_HPD2_RX_INTERRUPT (1 << 18) +# define DISP_TIMER_INTERRUPT (1 << 24) +#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc +# define LB_D3_VLINE_INTERRUPT (1 << 2) +# define LB_D3_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD3_INTERRUPT (1 << 17) +# define DC_HPD3_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 +# define LB_D4_VLINE_INTERRUPT (1 << 2) +# define LB_D4_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD4_INTERRUPT (1 << 17) +# define DC_HPD4_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c +# define LB_D5_VLINE_INTERRUPT (1 << 2) +# define LB_D5_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD5_INTERRUPT (1 << 17) +# define DC_HPD5_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050 +# define LB_D6_VLINE_INTERRUPT (1 << 2) +# define LB_D6_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD6_INTERRUPT (1 << 17) +# define DC_HPD6_RX_INTERRUPT (1 << 18) + +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + +#define DACA_AUTODETECT_INT_CONTROL 0x66c8 +#define DACB_AUTODETECT_INT_CONTROL 0x67c8 + +#define DC_HPD1_INT_STATUS 0x601c +#define DC_HPD2_INT_STATUS 0x6028 +#define DC_HPD3_INT_STATUS 0x6034 +#define DC_HPD4_INT_STATUS 0x6040 +#define DC_HPD5_INT_STATUS 0x604c +#define DC_HPD6_INT_STATUS 0x6058 +# define DC_HPDx_INT_STATUS (1 << 0) +# define DC_HPDx_SENSE (1 << 1) +# define DC_HPDx_RX_INT_STATUS (1 << 8) + +#define DC_HPD1_INT_CONTROL 0x6020 +#define DC_HPD2_INT_CONTROL 0x602c +#define DC_HPD3_INT_CONTROL 0x6038 +#define DC_HPD4_INT_CONTROL 0x6044 +#define DC_HPD5_INT_CONTROL 0x6050 +#define DC_HPD6_INT_CONTROL 0x605c +# define DC_HPDx_INT_ACK (1 << 0) +# define DC_HPDx_INT_POLARITY (1 << 8) +# define DC_HPDx_INT_EN (1 << 16) +# define DC_HPDx_RX_INT_ACK (1 << 20) +# define DC_HPDx_RX_INT_EN (1 << 24) + +#define DC_HPD1_CONTROL 0x6024 +#define DC_HPD2_CONTROL 0x6030 +#define DC_HPD3_CONTROL 0x603c +#define DC_HPD4_CONTROL 0x6048 +#define DC_HPD5_CONTROL 0x6054 +#define DC_HPD6_CONTROL 0x6060 +# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) +# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) +# define DC_HPDx_EN (1 << 28) + +/* + * PM4 + */ +#define PACKET_TYPE0 0 +#define PACKET_TYPE1 1 +#define PACKET_TYPE2 2 +#define PACKET_TYPE3 3 + +#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) +#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) +#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) +#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) +#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ + (((reg) >> 2) & 0xFFFF) | \ + ((n) & 0x3FFF) << 16) +#define CP_PACKET2 0x80000000 +#define PACKET2_PAD_SHIFT 0 +#define PACKET2_PAD_MASK (0x3fffffff << 0) + +#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) + +#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ + (((op) & 0xFF) << 8) | \ + ((n) & 0x3FFF) << 16) + +/* Packet 3 types */ +#define PACKET3_NOP 0x10 +#define PACKET3_SET_BASE 0x11 +#define PACKET3_CLEAR_STATE 0x12 +#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 +#define PACKET3_DISPATCH_DIRECT 0x15 +#define PACKET3_DISPATCH_INDIRECT 0x16 +#define PACKET3_INDIRECT_BUFFER_END 0x17 +#define PACKET3_SET_PREDICATION 0x20 +#define PACKET3_REG_RMW 0x21 +#define PACKET3_COND_EXEC 0x22 +#define PACKET3_PRED_EXEC 0x23 +#define PACKET3_DRAW_INDIRECT 0x24 +#define PACKET3_DRAW_INDEX_INDIRECT 0x25 +#define PACKET3_INDEX_BASE 0x26 +#define PACKET3_DRAW_INDEX_2 0x27 +#define PACKET3_CONTEXT_CONTROL 0x28 +#define PACKET3_DRAW_INDEX_OFFSET 0x29 +#define PACKET3_INDEX_TYPE 0x2A +#define PACKET3_DRAW_INDEX 0x2B +#define PACKET3_DRAW_INDEX_AUTO 0x2D +#define PACKET3_DRAW_INDEX_IMMD 0x2E +#define PACKET3_NUM_INSTANCES 0x2F +#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 +#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 +#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 +#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 +#define PACKET3_MEM_SEMAPHORE 0x39 +#define PACKET3_MPEG_INDEX 0x3A +#define PACKET3_WAIT_REG_MEM 0x3C +#define PACKET3_MEM_WRITE 0x3D +#define PACKET3_INDIRECT_BUFFER 0x32 +#define PACKET3_SURFACE_SYNC 0x43 +# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) +# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) +# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) +# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) +# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) +# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) +# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) +# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) +# define PACKET3_DB_DEST_BASE_ENA (1 << 14) +# define PACKET3_CB8_DEST_BASE_ENA (1 << 15) +# define PACKET3_CB9_DEST_BASE_ENA (1 << 16) +# define PACKET3_CB10_DEST_BASE_ENA (1 << 17) +# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) +# define PACKET3_FULL_CACHE_ENA (1 << 20) +# define PACKET3_TC_ACTION_ENA (1 << 23) +# define PACKET3_VC_ACTION_ENA (1 << 24) +# define PACKET3_CB_ACTION_ENA (1 << 25) +# define PACKET3_DB_ACTION_ENA (1 << 26) +# define PACKET3_SH_ACTION_ENA (1 << 27) +# define PACKET3_SMX_ACTION_ENA (1 << 28) +#define PACKET3_ME_INITIALIZE 0x44 +#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) +#define PACKET3_COND_WRITE 0x45 +#define PACKET3_EVENT_WRITE 0x46 +#define PACKET3_EVENT_WRITE_EOP 0x47 +#define PACKET3_EVENT_WRITE_EOS 0x48 +#define PACKET3_PREAMBLE_CNTL 0x4A +#define PACKET3_RB_OFFSET 0x4B +#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C +#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D +#define PACKET3_ALU_PS_CONST_UPDATE 0x4E +#define PACKET3_ALU_VS_CONST_UPDATE 0x4F +#define PACKET3_ONE_REG_WRITE 0x57 +#define PACKET3_SET_CONFIG_REG 0x68 +#define PACKET3_SET_CONFIG_REG_START 0x00008000 +#define PACKET3_SET_CONFIG_REG_END 0x0000ac00 +#define PACKET3_SET_CONTEXT_REG 0x69 +#define PACKET3_SET_CONTEXT_REG_START 0x00028000 +#define PACKET3_SET_CONTEXT_REG_END 0x00029000 +#define PACKET3_SET_ALU_CONST 0x6A +/* alu const buffers only; no reg file */ +#define PACKET3_SET_BOOL_CONST 0x6B +#define PACKET3_SET_BOOL_CONST_START 0x0003a500 +#define PACKET3_SET_BOOL_CONST_END 0x0003a518 +#define PACKET3_SET_LOOP_CONST 0x6C +#define PACKET3_SET_LOOP_CONST_START 0x0003a200 +#define PACKET3_SET_LOOP_CONST_END 0x0003a500 +#define PACKET3_SET_RESOURCE 0x6D +#define PACKET3_SET_RESOURCE_START 0x00030000 +#define PACKET3_SET_RESOURCE_END 0x00038000 +#define PACKET3_SET_SAMPLER 0x6E +#define PACKET3_SET_SAMPLER_START 0x0003c000 +#define PACKET3_SET_SAMPLER_END 0x0003c600 +#define PACKET3_SET_CTL_CONST 0x6F +#define PACKET3_SET_CTL_CONST_START 0x0003cff0 +#define PACKET3_SET_CTL_CONST_END 0x0003ff0c +#define PACKET3_SET_RESOURCE_OFFSET 0x70 +#define PACKET3_SET_ALU_CONST_VS 0x71 +#define PACKET3_SET_ALU_CONST_DI 0x72 +#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 +#define PACKET3_SET_RESOURCE_INDIRECT 0x74 +#define PACKET3_SET_APPEND_CNT 0x75 + +#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c +#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) +#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) +#define SQ_TEX_VTX_INVALID_TEXTURE 0x0 +#define SQ_TEX_VTX_INVALID_BUFFER 0x1 +#define SQ_TEX_VTX_VALID_TEXTURE 0x2 +#define SQ_TEX_VTX_VALID_BUFFER 0x3 + +#define SQ_CONST_MEM_BASE 0x8df8 + +#define SQ_ESGS_RING_SIZE 0x8c44 +#define SQ_GSVS_RING_SIZE 0x8c4c +#define SQ_ESTMP_RING_SIZE 0x8c54 +#define SQ_GSTMP_RING_SIZE 0x8c5c +#define SQ_VSTMP_RING_SIZE 0x8c64 +#define SQ_PSTMP_RING_SIZE 0x8c6c +#define SQ_LSTMP_RING_SIZE 0x8e14 +#define SQ_HSTMP_RING_SIZE 0x8e1c +#define VGT_TF_RING_SIZE 0x8988 + +#define SQ_ESGS_RING_ITEMSIZE 0x28900 +#define SQ_GSVS_RING_ITEMSIZE 0x28904 +#define SQ_ESTMP_RING_ITEMSIZE 0x28908 +#define SQ_GSTMP_RING_ITEMSIZE 0x2890c +#define SQ_VSTMP_RING_ITEMSIZE 0x28910 +#define SQ_PSTMP_RING_ITEMSIZE 0x28914 +#define SQ_LSTMP_RING_ITEMSIZE 0x28830 +#define SQ_HSTMP_RING_ITEMSIZE 0x28834 + +#define SQ_GS_VERT_ITEMSIZE 0x2891c +#define SQ_GS_VERT_ITEMSIZE_1 0x28920 +#define SQ_GS_VERT_ITEMSIZE_2 0x28924 +#define SQ_GS_VERT_ITEMSIZE_3 0x28928 +#define SQ_GSVS_RING_OFFSET_1 0x2892c +#define SQ_GSVS_RING_OFFSET_2 0x28930 +#define SQ_GSVS_RING_OFFSET_3 0x28934 + +#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 +#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 + +#define SQ_ALU_CONST_CACHE_PS_0 0x28940 +#define SQ_ALU_CONST_CACHE_PS_1 0x28944 +#define SQ_ALU_CONST_CACHE_PS_2 0x28948 +#define SQ_ALU_CONST_CACHE_PS_3 0x2894c +#define SQ_ALU_CONST_CACHE_PS_4 0x28950 +#define SQ_ALU_CONST_CACHE_PS_5 0x28954 +#define SQ_ALU_CONST_CACHE_PS_6 0x28958 +#define SQ_ALU_CONST_CACHE_PS_7 0x2895c +#define SQ_ALU_CONST_CACHE_PS_8 0x28960 +#define SQ_ALU_CONST_CACHE_PS_9 0x28964 +#define SQ_ALU_CONST_CACHE_PS_10 0x28968 +#define SQ_ALU_CONST_CACHE_PS_11 0x2896c +#define SQ_ALU_CONST_CACHE_PS_12 0x28970 +#define SQ_ALU_CONST_CACHE_PS_13 0x28974 +#define SQ_ALU_CONST_CACHE_PS_14 0x28978 +#define SQ_ALU_CONST_CACHE_PS_15 0x2897c +#define SQ_ALU_CONST_CACHE_VS_0 0x28980 +#define SQ_ALU_CONST_CACHE_VS_1 0x28984 +#define SQ_ALU_CONST_CACHE_VS_2 0x28988 +#define SQ_ALU_CONST_CACHE_VS_3 0x2898c +#define SQ_ALU_CONST_CACHE_VS_4 0x28990 +#define SQ_ALU_CONST_CACHE_VS_5 0x28994 +#define SQ_ALU_CONST_CACHE_VS_6 0x28998 +#define SQ_ALU_CONST_CACHE_VS_7 0x2899c +#define SQ_ALU_CONST_CACHE_VS_8 0x289a0 +#define SQ_ALU_CONST_CACHE_VS_9 0x289a4 +#define SQ_ALU_CONST_CACHE_VS_10 0x289a8 +#define SQ_ALU_CONST_CACHE_VS_11 0x289ac +#define SQ_ALU_CONST_CACHE_VS_12 0x289b0 +#define SQ_ALU_CONST_CACHE_VS_13 0x289b4 +#define SQ_ALU_CONST_CACHE_VS_14 0x289b8 +#define SQ_ALU_CONST_CACHE_VS_15 0x289bc +#define SQ_ALU_CONST_CACHE_GS_0 0x289c0 +#define SQ_ALU_CONST_CACHE_GS_1 0x289c4 +#define SQ_ALU_CONST_CACHE_GS_2 0x289c8 +#define SQ_ALU_CONST_CACHE_GS_3 0x289cc +#define SQ_ALU_CONST_CACHE_GS_4 0x289d0 +#define SQ_ALU_CONST_CACHE_GS_5 0x289d4 +#define SQ_ALU_CONST_CACHE_GS_6 0x289d8 +#define SQ_ALU_CONST_CACHE_GS_7 0x289dc +#define SQ_ALU_CONST_CACHE_GS_8 0x289e0 +#define SQ_ALU_CONST_CACHE_GS_9 0x289e4 +#define SQ_ALU_CONST_CACHE_GS_10 0x289e8 +#define SQ_ALU_CONST_CACHE_GS_11 0x289ec +#define SQ_ALU_CONST_CACHE_GS_12 0x289f0 +#define SQ_ALU_CONST_CACHE_GS_13 0x289f4 +#define SQ_ALU_CONST_CACHE_GS_14 0x289f8 +#define SQ_ALU_CONST_CACHE_GS_15 0x289fc +#define SQ_ALU_CONST_CACHE_HS_0 0x28f00 +#define SQ_ALU_CONST_CACHE_HS_1 0x28f04 +#define SQ_ALU_CONST_CACHE_HS_2 0x28f08 +#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c +#define SQ_ALU_CONST_CACHE_HS_4 0x28f10 +#define SQ_ALU_CONST_CACHE_HS_5 0x28f14 +#define SQ_ALU_CONST_CACHE_HS_6 0x28f18 +#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c +#define SQ_ALU_CONST_CACHE_HS_8 0x28f20 +#define SQ_ALU_CONST_CACHE_HS_9 0x28f24 +#define SQ_ALU_CONST_CACHE_HS_10 0x28f28 +#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c +#define SQ_ALU_CONST_CACHE_HS_12 0x28f30 +#define SQ_ALU_CONST_CACHE_HS_13 0x28f34 +#define SQ_ALU_CONST_CACHE_HS_14 0x28f38 +#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c +#define SQ_ALU_CONST_CACHE_LS_0 0x28f40 +#define SQ_ALU_CONST_CACHE_LS_1 0x28f44 +#define SQ_ALU_CONST_CACHE_LS_2 0x28f48 +#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c +#define SQ_ALU_CONST_CACHE_LS_4 0x28f50 +#define SQ_ALU_CONST_CACHE_LS_5 0x28f54 +#define SQ_ALU_CONST_CACHE_LS_6 0x28f58 +#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c +#define SQ_ALU_CONST_CACHE_LS_8 0x28f60 +#define SQ_ALU_CONST_CACHE_LS_9 0x28f64 +#define SQ_ALU_CONST_CACHE_LS_10 0x28f68 +#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c +#define SQ_ALU_CONST_CACHE_LS_12 0x28f70 +#define SQ_ALU_CONST_CACHE_LS_13 0x28f74 +#define SQ_ALU_CONST_CACHE_LS_14 0x28f78 +#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c + +#define DB_DEPTH_CONTROL 0x28800 +#define DB_DEPTH_VIEW 0x28008 +#define DB_HTILE_DATA_BASE 0x28014 +#define DB_Z_INFO 0x28040 +# define Z_ARRAY_MODE(x) ((x) << 4) +#define DB_STENCIL_INFO 0x28044 +#define DB_Z_READ_BASE 0x28048 +#define DB_STENCIL_READ_BASE 0x2804c +#define DB_Z_WRITE_BASE 0x28050 +#define DB_STENCIL_WRITE_BASE 0x28054 +#define DB_DEPTH_SIZE 0x28058 + +#define SQ_PGM_START_PS 0x28840 +#define SQ_PGM_START_VS 0x2885c +#define SQ_PGM_START_GS 0x28874 +#define SQ_PGM_START_ES 0x2888c +#define SQ_PGM_START_FS 0x288a4 +#define SQ_PGM_START_HS 0x288b8 +#define SQ_PGM_START_LS 0x288d0 + +#define VGT_STRMOUT_CONFIG 0x28b94 +#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 + +#define CB_TARGET_MASK 0x28238 +#define CB_SHADER_MASK 0x2823c + +#define GDS_ADDR_BASE 0x28720 + +#define CB_IMMED0_BASE 0x28b9c +#define CB_IMMED1_BASE 0x28ba0 +#define CB_IMMED2_BASE 0x28ba4 +#define CB_IMMED3_BASE 0x28ba8 +#define CB_IMMED4_BASE 0x28bac +#define CB_IMMED5_BASE 0x28bb0 +#define CB_IMMED6_BASE 0x28bb4 +#define CB_IMMED7_BASE 0x28bb8 +#define CB_IMMED8_BASE 0x28bbc +#define CB_IMMED9_BASE 0x28bc0 +#define CB_IMMED10_BASE 0x28bc4 +#define CB_IMMED11_BASE 0x28bc8 + +/* all 12 CB blocks have these regs */ +#define CB_COLOR0_BASE 0x28c60 +#define CB_COLOR0_PITCH 0x28c64 +#define CB_COLOR0_SLICE 0x28c68 +#define CB_COLOR0_VIEW 0x28c6c +#define CB_COLOR0_INFO 0x28c70 +# define CB_ARRAY_MODE(x) ((x) << 8) +# define ARRAY_LINEAR_GENERAL 0 +# define ARRAY_LINEAR_ALIGNED 1 +# define ARRAY_1D_TILED_THIN1 2 +# define ARRAY_2D_TILED_THIN1 4 +#define CB_COLOR0_ATTRIB 0x28c74 +#define CB_COLOR0_DIM 0x28c78 +/* only CB0-7 blocks have these regs */ +#define CB_COLOR0_CMASK 0x28c7c +#define CB_COLOR0_CMASK_SLICE 0x28c80 +#define CB_COLOR0_FMASK 0x28c84 +#define CB_COLOR0_FMASK_SLICE 0x28c88 +#define CB_COLOR0_CLEAR_WORD0 0x28c8c +#define CB_COLOR0_CLEAR_WORD1 0x28c90 +#define CB_COLOR0_CLEAR_WORD2 0x28c94 +#define CB_COLOR0_CLEAR_WORD3 0x28c98 + +#define CB_COLOR1_BASE 0x28c9c +#define CB_COLOR2_BASE 0x28cd8 +#define CB_COLOR3_BASE 0x28d14 +#define CB_COLOR4_BASE 0x28d50 +#define CB_COLOR5_BASE 0x28d8c +#define CB_COLOR6_BASE 0x28dc8 +#define CB_COLOR7_BASE 0x28e04 +#define CB_COLOR8_BASE 0x28e40 +#define CB_COLOR9_BASE 0x28e5c +#define CB_COLOR10_BASE 0x28e78 +#define CB_COLOR11_BASE 0x28e94 + +#define CB_COLOR1_PITCH 0x28ca0 +#define CB_COLOR2_PITCH 0x28cdc +#define CB_COLOR3_PITCH 0x28d18 +#define CB_COLOR4_PITCH 0x28d54 +#define CB_COLOR5_PITCH 0x28d90 +#define CB_COLOR6_PITCH 0x28dcc +#define CB_COLOR7_PITCH 0x28e08 +#define CB_COLOR8_PITCH 0x28e44 +#define CB_COLOR9_PITCH 0x28e60 +#define CB_COLOR10_PITCH 0x28e7c +#define CB_COLOR11_PITCH 0x28e98 + +#define CB_COLOR1_SLICE 0x28ca4 +#define CB_COLOR2_SLICE 0x28ce0 +#define CB_COLOR3_SLICE 0x28d1c +#define CB_COLOR4_SLICE 0x28d58 +#define CB_COLOR5_SLICE 0x28d94 +#define CB_COLOR6_SLICE 0x28dd0 +#define CB_COLOR7_SLICE 0x28e0c +#define CB_COLOR8_SLICE 0x28e48 +#define CB_COLOR9_SLICE 0x28e64 +#define CB_COLOR10_SLICE 0x28e80 +#define CB_COLOR11_SLICE 0x28e9c + +#define CB_COLOR1_VIEW 0x28ca8 +#define CB_COLOR2_VIEW 0x28ce4 +#define CB_COLOR3_VIEW 0x28d20 +#define CB_COLOR4_VIEW 0x28d5c +#define CB_COLOR5_VIEW 0x28d98 +#define CB_COLOR6_VIEW 0x28dd4 +#define CB_COLOR7_VIEW 0x28e10 +#define CB_COLOR8_VIEW 0x28e4c +#define CB_COLOR9_VIEW 0x28e68 +#define CB_COLOR10_VIEW 0x28e84 +#define CB_COLOR11_VIEW 0x28ea0 + +#define CB_COLOR1_INFO 0x28cac +#define CB_COLOR2_INFO 0x28ce8 +#define CB_COLOR3_INFO 0x28d24 +#define CB_COLOR4_INFO 0x28d60 +#define CB_COLOR5_INFO 0x28d9c +#define CB_COLOR6_INFO 0x28dd8 +#define CB_COLOR7_INFO 0x28e14 +#define CB_COLOR8_INFO 0x28e50 +#define CB_COLOR9_INFO 0x28e6c +#define CB_COLOR10_INFO 0x28e88 +#define CB_COLOR11_INFO 0x28ea4 + +#define CB_COLOR1_ATTRIB 0x28cb0 +#define CB_COLOR2_ATTRIB 0x28cec +#define CB_COLOR3_ATTRIB 0x28d28 +#define CB_COLOR4_ATTRIB 0x28d64 +#define CB_COLOR5_ATTRIB 0x28da0 +#define CB_COLOR6_ATTRIB 0x28ddc +#define CB_COLOR7_ATTRIB 0x28e18 +#define CB_COLOR8_ATTRIB 0x28e54 +#define CB_COLOR9_ATTRIB 0x28e70 +#define CB_COLOR10_ATTRIB 0x28e8c +#define CB_COLOR11_ATTRIB 0x28ea8 + +#define CB_COLOR1_DIM 0x28cb4 +#define CB_COLOR2_DIM 0x28cf0 +#define CB_COLOR3_DIM 0x28d2c +#define CB_COLOR4_DIM 0x28d68 +#define CB_COLOR5_DIM 0x28da4 +#define CB_COLOR6_DIM 0x28de0 +#define CB_COLOR7_DIM 0x28e1c +#define CB_COLOR8_DIM 0x28e58 +#define CB_COLOR9_DIM 0x28e74 +#define CB_COLOR10_DIM 0x28e90 +#define CB_COLOR11_DIM 0x28eac + +#define CB_COLOR1_CMASK 0x28cb8 +#define CB_COLOR2_CMASK 0x28cf4 +#define CB_COLOR3_CMASK 0x28d30 +#define CB_COLOR4_CMASK 0x28d6c +#define CB_COLOR5_CMASK 0x28da8 +#define CB_COLOR6_CMASK 0x28de4 +#define CB_COLOR7_CMASK 0x28e20 + +#define CB_COLOR1_CMASK_SLICE 0x28cbc +#define CB_COLOR2_CMASK_SLICE 0x28cf8 +#define CB_COLOR3_CMASK_SLICE 0x28d34 +#define CB_COLOR4_CMASK_SLICE 0x28d70 +#define CB_COLOR5_CMASK_SLICE 0x28dac +#define CB_COLOR6_CMASK_SLICE 0x28de8 +#define CB_COLOR7_CMASK_SLICE 0x28e24 + +#define CB_COLOR1_FMASK 0x28cc0 +#define CB_COLOR2_FMASK 0x28cfc +#define CB_COLOR3_FMASK 0x28d38 +#define CB_COLOR4_FMASK 0x28d74 +#define CB_COLOR5_FMASK 0x28db0 +#define CB_COLOR6_FMASK 0x28dec +#define CB_COLOR7_FMASK 0x28e28 + +#define CB_COLOR1_FMASK_SLICE 0x28cc4 +#define CB_COLOR2_FMASK_SLICE 0x28d00 +#define CB_COLOR3_FMASK_SLICE 0x28d3c +#define CB_COLOR4_FMASK_SLICE 0x28d78 +#define CB_COLOR5_FMASK_SLICE 0x28db4 +#define CB_COLOR6_FMASK_SLICE 0x28df0 +#define CB_COLOR7_FMASK_SLICE 0x28e2c + +#define CB_COLOR1_CLEAR_WORD0 0x28cc8 +#define CB_COLOR2_CLEAR_WORD0 0x28d04 +#define CB_COLOR3_CLEAR_WORD0 0x28d40 +#define CB_COLOR4_CLEAR_WORD0 0x28d7c +#define CB_COLOR5_CLEAR_WORD0 0x28db8 +#define CB_COLOR6_CLEAR_WORD0 0x28df4 +#define CB_COLOR7_CLEAR_WORD0 0x28e30 + +#define CB_COLOR1_CLEAR_WORD1 0x28ccc +#define CB_COLOR2_CLEAR_WORD1 0x28d08 +#define CB_COLOR3_CLEAR_WORD1 0x28d44 +#define CB_COLOR4_CLEAR_WORD1 0x28d80 +#define CB_COLOR5_CLEAR_WORD1 0x28dbc +#define CB_COLOR6_CLEAR_WORD1 0x28df8 +#define CB_COLOR7_CLEAR_WORD1 0x28e34 + +#define CB_COLOR1_CLEAR_WORD2 0x28cd0 +#define CB_COLOR2_CLEAR_WORD2 0x28d0c +#define CB_COLOR3_CLEAR_WORD2 0x28d48 +#define CB_COLOR4_CLEAR_WORD2 0x28d84 +#define CB_COLOR5_CLEAR_WORD2 0x28dc0 +#define CB_COLOR6_CLEAR_WORD2 0x28dfc +#define CB_COLOR7_CLEAR_WORD2 0x28e38 + +#define CB_COLOR1_CLEAR_WORD3 0x28cd4 +#define CB_COLOR2_CLEAR_WORD3 0x28d10 +#define CB_COLOR3_CLEAR_WORD3 0x28d4c +#define CB_COLOR4_CLEAR_WORD3 0x28d88 +#define CB_COLOR5_CLEAR_WORD3 0x28dc4 +#define CB_COLOR6_CLEAR_WORD3 0x28e00 +#define CB_COLOR7_CLEAR_WORD3 0x28e3c + +#define SQ_TEX_RESOURCE_WORD0_0 0x30000 +#define SQ_TEX_RESOURCE_WORD1_0 0x30004 +# define TEX_ARRAY_MODE(x) ((x) << 28) +#define SQ_TEX_RESOURCE_WORD2_0 0x30008 +#define SQ_TEX_RESOURCE_WORD3_0 0x3000C +#define SQ_TEX_RESOURCE_WORD4_0 0x30010 +#define SQ_TEX_RESOURCE_WORD5_0 0x30014 +#define SQ_TEX_RESOURCE_WORD6_0 0x30018 +#define SQ_TEX_RESOURCE_WORD7_0 0x3001c + + +#endif diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 91eb762eb3f9..a89a15ab524d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -26,15 +26,18 @@ * Jerome Glisse */ #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_asic.h" #include "r100d.h" #include "rs100d.h" #include "rv200d.h" #include "rv250d.h" +#include "atom.h" #include <linux/firmware.h> #include <linux/platform_device.h> @@ -65,6 +68,274 @@ MODULE_FIRMWARE(FIRMWARE_R520); * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +void r100_pm_get_dynpm_state(struct radeon_device *rdev) +{ + int i; + rdev->pm.dynpm_can_upclock = true; + rdev->pm.dynpm_can_downclock = true; + + switch (rdev->pm.dynpm_planned_action) { + case DYNPM_ACTION_MINIMUM: + rdev->pm.requested_power_state_index = 0; + rdev->pm.dynpm_can_downclock = false; + break; + case DYNPM_ACTION_DOWNCLOCK: + if (rdev->pm.current_power_state_index == 0) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_downclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i >= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index - 1; + } + /* don't use the power state if crtcs are active and no display flag is set */ + if ((rdev->pm.active_crtc_count > 0) && + (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & + RADEON_PM_MODE_NO_DISPLAY)) { + rdev->pm.requested_power_state_index++; + } + break; + case DYNPM_ACTION_UPCLOCK: + if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_upclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i <= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index + 1; + } + break; + case DYNPM_ACTION_DEFAULT: + rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.dynpm_can_upclock = false; + break; + case DYNPM_ACTION_NONE: + default: + DRM_ERROR("Requested mode for not defined action\n"); + return; + } + /* only one clock mode per power state */ + rdev->pm.requested_clock_mode_index = 0; + + DRM_DEBUG("Requested: e: %d m: %d p: %d\n", + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].sclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].mclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + pcie_lanes); +} + +void r100_pm_init_profile(struct radeon_device *rdev) +{ + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; +} + +void r100_pm_misc(struct radeon_device *rdev) +{ + int requested_index = rdev->pm.requested_power_state_index; + struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; + struct radeon_voltage *voltage = &ps->clock_info[0].voltage; + u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; + + if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { + if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp |= voltage->gpio.mask; + else + tmp &= ~(voltage->gpio.mask); + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } else { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp &= ~voltage->gpio.mask; + else + tmp |= voltage->gpio.mask; + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } + } + + sclk_cntl = RREG32_PLL(SCLK_CNTL); + sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); + sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); + sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); + sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); + if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { + sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; + if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) + sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; + else + sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) + sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); + else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) + sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); + } else + sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; + + if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { + sclk_more_cntl |= IO_CG_VOLTAGE_DROP; + if (voltage->delay) { + sclk_more_cntl |= VOLTAGE_DROP_SYNC; + switch (voltage->delay) { + case 33: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); + break; + case 66: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); + break; + case 99: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); + break; + case 132: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); + break; + } + } else + sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; + } else + sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; + + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) + sclk_cntl &= ~FORCE_HDP; + else + sclk_cntl |= FORCE_HDP; + + WREG32_PLL(SCLK_CNTL, sclk_cntl); + WREG32_PLL(SCLK_CNTL2, sclk_cntl2); + WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); + + /* set pcie lanes */ + if ((rdev->flags & RADEON_IS_PCIE) && + !(rdev->flags & RADEON_IS_IGP) && + rdev->asic->set_pcie_lanes && + (ps->pcie_lanes != + rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { + radeon_set_pcie_lanes(rdev, + ps->pcie_lanes); + DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); + } +} + +void r100_pm_prepare(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* disable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + if (radeon_crtc->crtc_id) { + tmp = RREG32(RADEON_CRTC2_GEN_CNTL); + tmp |= RADEON_CRTC2_DISP_REQ_EN_B; + WREG32(RADEON_CRTC2_GEN_CNTL, tmp); + } else { + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp |= RADEON_CRTC_DISP_REQ_EN_B; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + } + } + } +} + +void r100_pm_finish(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* enable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + if (radeon_crtc->crtc_id) { + tmp = RREG32(RADEON_CRTC2_GEN_CNTL); + tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; + WREG32(RADEON_CRTC2_GEN_CNTL, tmp); + } else { + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + } + } + } +} + +bool r100_gui_idle(struct radeon_device *rdev) +{ + if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) + return false; + else + return true; +} + /* hpd for digital panel detect/disconnect */ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -235,9 +506,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) void r100_pci_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); r100_pci_gart_disable(rdev); radeon_gart_table_ram_free(rdev); - radeon_gart_fini(rdev); } int r100_irq_set(struct radeon_device *rdev) @@ -252,6 +523,9 @@ int r100_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { tmp |= RADEON_SW_INT_ENABLE; } + if (rdev->irq.gui_idle) { + tmp |= RADEON_GUI_IDLE_MASK; + } if (rdev->irq.crtc_vblank_int[0]) { tmp |= RADEON_CRTC_VBLANK_MASK; } @@ -286,6 +560,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; + /* the interrupt works, but the status bit is permanently asserted */ + if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { + if (!rdev->irq.gui_idle_acked) + irq_mask |= RADEON_GUI_IDLE_STAT; + } + if (irqs) { WREG32(RADEON_GEN_INT_STATUS, irqs); } @@ -297,6 +577,9 @@ int r100_irq_process(struct radeon_device *rdev) uint32_t status, msi_rearm; bool queue_hotplug = false; + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; + status = r100_irq_ack(rdev); if (!status) { return IRQ_NONE; @@ -309,13 +592,21 @@ int r100_irq_process(struct radeon_device *rdev) if (status & RADEON_SW_INT_TEST) { radeon_fence_process(rdev); } + /* gui idle interrupt */ + if (status & RADEON_GUI_IDLE_STAT) { + rdev->irq.gui_idle_acked = true; + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (status & RADEON_CRTC2_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (status & RADEON_FP_DETECT_STAT) { @@ -328,6 +619,8 @@ int r100_irq_process(struct radeon_device *rdev) } status = r100_irq_ack(rdev); } + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; if (queue_hotplug) queue_work(rdev->wq, &rdev->hotplug_work); if (rdev->msi_enabled) { @@ -659,26 +952,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) if (r100_debugfs_cp_init(rdev)) { DRM_ERROR("Failed to register debugfs file for CP !\n"); } - /* Reset CP */ - tmp = RREG32(RADEON_CP_CSQ_STAT); - if ((tmp & (1 << 31))) { - DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); - tmp = RREG32(RADEON_RBBM_SOFT_RESET); - mdelay(2); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - tmp = RREG32(RADEON_RBBM_SOFT_RESET); - mdelay(2); - tmp = RREG32(RADEON_CP_CSQ_STAT); - if ((tmp & (1 << 31))) { - DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); - } - } else { - DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); - } - if (!rdev->me_fw) { r = r100_cp_init_microcode(rdev); if (r) { @@ -741,6 +1014,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) udelay(10); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); + /* protect against crazy HW on resume */ + rdev->cp.wptr &= rdev->cp.ptr_mask; /* Set cp mode to bus mastering & enable cp*/ WREG32(RADEON_CP_CSQ_MODE, REG_SET(RADEON_INDIRECT2_START, indirect2_start) | @@ -781,39 +1056,6 @@ void r100_cp_disable(struct radeon_device *rdev) } } -int r100_cp_reset(struct radeon_device *rdev) -{ - uint32_t tmp; - bool reinit_cp; - int i; - - reinit_cp = rdev->cp.ready; - rdev->cp.ready = false; - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); - (void)RREG32(RADEON_RBBM_SOFT_RESET); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & (1 << 16))) { - DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - if (reinit_cp) { - return r100_cp_init(rdev, rdev->cp.ring_size); - } - return 0; - } - DRM_UDELAY(1); - } - tmp = RREG32(RADEON_RBBM_STATUS); - DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); - return -1; -} - void r100_cp_commit(struct radeon_device *rdev) { WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); @@ -988,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); reg = CP_PACKET0_GET_REG(header); - mutex_lock(&p->rdev->ddev->mode_config.mutex); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_ERROR("cannot find crtc %d\n", crtc_id); @@ -1022,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; } out: - mutex_unlock(&p->rdev->ddev->mode_config.mutex); return r; } @@ -1386,6 +1626,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_TXFORMAT_RGB332: case RADEON_TXFORMAT_Y8: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_AI88: case RADEON_TXFORMAT_ARGB1555: @@ -1397,12 +1638,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_TXFORMAT_LDUDV655: case RADEON_TXFORMAT_DUDV88: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: case RADEON_TXFORMAT_SHADOW32: case RADEON_TXFORMAT_LDUDUV8888: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case RADEON_TXFORMAT_DXT1: track->textures[i].cpp = 1; @@ -1727,76 +1970,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -void r100_gpu_init(struct radeon_device *rdev) +void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) { - /* TODO: anythings to do here ? pipes ? */ - r100_hdp_reset(rdev); + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; +} + +/** + * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information + * @rdev: radeon device structure + * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations + * @cp: radeon_cp structure holding CP information + * + * We don't need to initialize the lockup tracking information as we will either + * have CP rptr to a different value of jiffies wrap around which will force + * initialization of the lockup tracking informations. + * + * A possible false positivie is if we get call after while and last_cp_rptr == + * the current CP rptr, even if it's unlikely it might happen. To avoid this + * if the elapsed time since last call is bigger than 2 second than we return + * false and update the tracking information. Due to this the caller must call + * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported + * the fencing code should be cautious about that. + * + * Caller should write to the ring to force CP to do something so we don't get + * false positive when CP is just gived nothing to do. + * + **/ +bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) +{ + unsigned long cjiffies, elapsed; + + cjiffies = jiffies; + if (!time_after(cjiffies, lockup->last_jiffies)) { + /* likely a wrap around */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + if (cp->rptr != lockup->last_cp_rptr) { + /* CP is still working no lockup */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); + if (elapsed >= 3000) { + /* very likely the improbable case where current + * rptr is equal to last recorded, a while ago, rptr + * this is more likely a false positive update tracking + * information which should force us to be recall at + * latter point + */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + if (elapsed >= 1000) { + dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); + return true; + } + /* give a chance to the GPU ... */ + return false; } -void r100_hdp_reset(struct radeon_device *rdev) +bool r100_gpu_is_lockup(struct radeon_device *rdev) { - uint32_t tmp; + u32 rbbm_status; + int r; - tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; - tmp |= (7 << 28); - WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); - (void)RREG32(RADEON_HOST_PATH_CNTL); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - WREG32(RADEON_HOST_PATH_CNTL, tmp); - (void)RREG32(RADEON_HOST_PATH_CNTL); + rbbm_status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(rbbm_status)) { + r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); + return false; + } + /* force CP activities */ + r = radeon_ring_lock(rdev, 2); + if (!r) { + /* PACKET2 NOP */ + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_unlock_commit(rdev); + } + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); } -int r100_rb2d_reset(struct radeon_device *rdev) +void r100_bm_disable(struct radeon_device *rdev) { - uint32_t tmp; - int i; + u32 tmp; - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); - (void)RREG32(RADEON_RBBM_SOFT_RESET); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ + /* disable bus mastering */ + tmp = RREG32(R_000030_BUS_CNTL); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); + tmp = RREG32(RADEON_BUS_CNTL); + mdelay(1); + pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); + pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); mdelay(1); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & (1 << 26))) { - DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - return 0; - } - DRM_UDELAY(1); - } - tmp = RREG32(RADEON_RBBM_STATUS); - DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); - return -1; } -int r100_gpu_reset(struct radeon_device *rdev) +int r100_asic_reset(struct radeon_device *rdev) { - uint32_t status; + struct r100_mc_save save; + u32 status, tmp; - /* reset order likely matter */ - status = RREG32(RADEON_RBBM_STATUS); - /* reset HDP */ - r100_hdp_reset(rdev); - /* reset rb2d */ - if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { - r100_rb2d_reset(rdev); + r100_mc_stop(rdev, &save); + status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(status)) { + return 0; } - /* TODO: reset 3D engine */ + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* stop CP */ + WREG32(RADEON_CP_CSQ_CNTL, 0); + tmp = RREG32(RADEON_CP_RB_CNTL); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_RPTR_WR, 0); + WREG32(RADEON_CP_RB_WPTR, 0); + WREG32(RADEON_CP_RB_CNTL, tmp); + /* save PCI state */ + pci_save_state(rdev->pdev); + /* disable bus mastering */ + r100_bm_disable(rdev); + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | + S_0000F0_SOFT_RESET_RE(1) | + S_0000F0_SOFT_RESET_PP(1) | + S_0000F0_SOFT_RESET_RB(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* reset CP */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 16)) { - r100_cp_reset(rdev); - } + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* restore PCI & busmastering */ + pci_restore_state(rdev->pdev); + r100_enable_bm(rdev); /* Check if GPU is idle */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & RADEON_RBBM_ACTIVE) { - DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); + if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || + G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { + dev_err(rdev->dev, "failed to reset GPU\n"); + rdev->gpu_lockup = true; return -1; } - DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); + r100_mc_resume(rdev, &save); + dev_info(rdev->dev, "GPU reset succeed\n"); return 0; } @@ -1804,6 +2134,7 @@ void r100_set_common_regs(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; bool force_dac2 = false; + u32 tmp; /* set these so they don't interfere with anything */ WREG32(RADEON_OV0_SCALE_CNTL, 0); @@ -1875,6 +2206,12 @@ void r100_set_common_regs(struct radeon_device *rdev) WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); WREG32(RADEON_DAC_CNTL2, dac2_cntl); } + + /* switch PM block to ACPI mode */ + tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); + tmp &= ~RADEON_PM_MODE_SEL; + WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); + } /* @@ -1989,11 +2326,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) else rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } - /* FIXME remove this once we support unmappable VRAM */ - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { - rdev->mc.mc_vram_size = rdev->mc.aper_size; - rdev->mc.real_vram_size = rdev->mc.aper_size; - } } void r100_vga_set_state(struct radeon_device *rdev, bool state) @@ -2020,8 +2352,10 @@ void r100_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; if (!(rdev->flags & RADEON_IS_AGP)) radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); } @@ -2272,12 +2606,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, int surf_index = reg * 16; int flags = 0; - /* r100/r200 divide by 16 */ - if (rdev->family < CHIP_R300) - flags = pitch / 16; - else - flags = pitch / 8; - if (rdev->family <= CHIP_RS200) { if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) @@ -2301,6 +2629,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, if (tiling_flags & RADEON_TILING_SWAP_32BIT) flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; + /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ + if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { + if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) + if (ASIC_IS_RN50(rdev)) + pitch /= 16; + } + + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags |= pitch / 16; + else + flags |= pitch / 8; + + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); WREG32(RADEON_SURFACE0_INFO + surf_index, flags); WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); @@ -2321,53 +2663,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; uint32_t temp, data, mem_trcd, mem_trp, mem_tras; fixed20_12 memtcas_ff[8] = { - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init(0), }; fixed20_12 memtcas_rs480_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init_half(3), + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init_half(3), }; fixed20_12 memtcas2_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), }; fixed20_12 memtrbs[8] = { - fixed_init(1), - fixed_init_half(1), - fixed_init(2), - fixed_init_half(2), - fixed_init(3), - fixed_init_half(3), - fixed_init(4), - fixed_init_half(4) + dfixed_init(1), + dfixed_init_half(1), + dfixed_init(2), + dfixed_init_half(2), + dfixed_init(3), + dfixed_init_half(3), + dfixed_init(4), + dfixed_init_half(4) }; fixed20_12 memtrbs_r4xx[8] = { - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - fixed_init(8), - fixed_init(9), - fixed_init(10), - fixed_init(11) + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), + dfixed_init(8), + dfixed_init(9), + dfixed_init(10), + dfixed_init(11) }; fixed20_12 min_mem_eff; fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; @@ -2385,6 +2727,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + radeon_update_display_priority(rdev); + if (rdev->mode_info.crtcs[0]->base.enabled) { mode1 = &rdev->mode_info.crtcs[0]->base.mode; pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; @@ -2396,7 +2740,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) } } - min_mem_eff.full = rfixed_const_8(0); + min_mem_eff.full = dfixed_const_8(0); /* get modes */ if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); @@ -2413,35 +2757,32 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* * determine is there is enough bw for current mode */ - mclk_ff.full = rfixed_const(rdev->clock.default_mclk); - temp_ff.full = rfixed_const(100); - mclk_ff.full = rfixed_div(mclk_ff, temp_ff); - sclk_ff.full = rfixed_const(rdev->clock.default_sclk); - sclk_ff.full = rfixed_div(sclk_ff, temp_ff); + sclk_ff = rdev->pm.sclk; + mclk_ff = rdev->pm.mclk; temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); + temp_ff.full = dfixed_const(temp); + mem_bw.full = dfixed_mul(mclk_ff, temp_ff); pix_clk.full = 0; pix_clk2.full = 0; peak_disp_bw.full = 0; if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); + temp_ff.full = dfixed_const(1000); + pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ + pix_clk.full = dfixed_div(pix_clk, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes1); + peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); } if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); + temp_ff.full = dfixed_const(1000); + pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ + pix_clk2.full = dfixed_div(pix_clk2, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes2); + peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); } - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); + mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); if (peak_disp_bw.full >= mem_bw.full) { DRM_ERROR("You may not have enough display bandwidth for current mode\n" "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); @@ -2483,9 +2824,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) mem_tras = ((temp >> 12) & 0xf) + 4; } /* convert to FF */ - trcd_ff.full = rfixed_const(mem_trcd); - trp_ff.full = rfixed_const(mem_trp); - tras_ff.full = rfixed_const(mem_tras); + trcd_ff.full = dfixed_const(mem_trcd); + trp_ff.full = dfixed_const(mem_trp); + tras_ff.full = dfixed_const(mem_tras); /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); @@ -2503,7 +2844,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* extra cas latency stored in bits 23-25 0-4 clocks */ data = (temp >> 23) & 0x7; if (data < 5) - tcas_ff.full += rfixed_const(data); + tcas_ff.full += dfixed_const(data); } if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { @@ -2540,72 +2881,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) { fixed20_12 agpmode_ff; - agpmode_ff.full = rfixed_const(radeon_agpmode); - temp_ff.full = rfixed_const_666(16); - sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); + agpmode_ff.full = dfixed_const(radeon_agpmode); + temp_ff.full = dfixed_const_666(16); + sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); } /* TODO PCIE lanes may affect this - agpmode == 16?? */ if (ASIC_IS_R300(rdev)) { - sclk_delay_ff.full = rfixed_const(250); + sclk_delay_ff.full = dfixed_const(250); } else { if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { if (rdev->mc.vram_is_ddr) - sclk_delay_ff.full = rfixed_const(41); + sclk_delay_ff.full = dfixed_const(41); else - sclk_delay_ff.full = rfixed_const(33); + sclk_delay_ff.full = dfixed_const(33); } else { if (rdev->mc.vram_width == 128) - sclk_delay_ff.full = rfixed_const(57); + sclk_delay_ff.full = dfixed_const(57); else - sclk_delay_ff.full = rfixed_const(41); + sclk_delay_ff.full = dfixed_const(41); } } - mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); + mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); if (rdev->mc.vram_is_ddr) { if (rdev->mc.vram_width == 32) { - k1.full = rfixed_const(40); + k1.full = dfixed_const(40); c = 3; } else { - k1.full = rfixed_const(20); + k1.full = dfixed_const(20); c = 1; } } else { - k1.full = rfixed_const(40); + k1.full = dfixed_const(40); c = 3; } - temp_ff.full = rfixed_const(2); - mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); - temp_ff.full = rfixed_const(c); - mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); - temp_ff.full = rfixed_const(4); - mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); - mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); + temp_ff.full = dfixed_const(2); + mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); + temp_ff.full = dfixed_const(c); + mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); + temp_ff.full = dfixed_const(4); + mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); + mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); mc_latency_mclk.full += k1.full; - mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); - mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); + mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); + mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); /* HW cursor time assuming worst case of full size colour cursor. */ - temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); temp_ff.full += trcd_ff.full; if (temp_ff.full < tras_ff.full) temp_ff.full = tras_ff.full; - cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); + cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); - temp_ff.full = rfixed_const(cur_size); - cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); + temp_ff.full = dfixed_const(cur_size); + cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); /* Find the total latency for the display data. */ - disp_latency_overhead.full = rfixed_const(8); - disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); + disp_latency_overhead.full = dfixed_const(8); + disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; @@ -2633,16 +2974,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* Find the drain rate of the display buffer. */ - temp_ff.full = rfixed_const((16/pixel_bytes1)); - disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); + temp_ff.full = dfixed_const((16/pixel_bytes1)); + disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); /* Find the critical point of the display buffer. */ - crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); - crit_point_ff.full += rfixed_const_half(0); + crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); + crit_point_ff.full += dfixed_const_half(0); - critical_point = rfixed_trunc(crit_point_ff); + critical_point = dfixed_trunc(crit_point_ff); if (rdev->disp_priority == 2) { critical_point = 0; @@ -2713,8 +3054,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* Find the drain rate of the display buffer. */ - temp_ff.full = rfixed_const((16/pixel_bytes2)); - disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); + temp_ff.full = dfixed_const((16/pixel_bytes2)); + disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); @@ -2735,8 +3076,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) critical_point2 = 0; else { temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; - temp_ff.full = rfixed_const(temp); - temp_ff.full = rfixed_mul(mclk_ff, temp_ff); + temp_ff.full = dfixed_const(temp); + temp_ff.full = dfixed_mul(mclk_ff, temp_ff); if (sclk_ff.full < temp_ff.full) temp_ff.full = sclk_ff.full; @@ -2744,15 +3085,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) if (mode1) { temp_ff.full = read_return_rate.full - disp_drain_rate.full; - time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); + time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); } else { time_disp1_drop_priority.full = 0; } crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; - crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); - crit_point_ff.full += rfixed_const_half(0); + crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); + crit_point_ff.full += dfixed_const_half(0); - critical_point2 = rfixed_trunc(crit_point_ff); + critical_point2 = dfixed_trunc(crit_point_ff); if (rdev->disp_priority == 2) { critical_point2 = 0; @@ -2816,33 +3157,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) DRM_ERROR("compress format %d\n", t->compress_format); } -static int r100_cs_track_cube(struct radeon_device *rdev, - struct r100_cs_track *track, unsigned idx) -{ - unsigned face, w, h; - struct radeon_bo *cube_robj; - unsigned long size; - - for (face = 0; face < 5; face++) { - cube_robj = track->textures[idx].cube_info[face].robj; - w = track->textures[idx].cube_info[face].width; - h = track->textures[idx].cube_info[face].height; - - size = w * h; - size *= track->textures[idx].cpp; - - size += track->textures[idx].cube_info[face].offset; - - if (size > radeon_bo_size(cube_robj)) { - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", - size, radeon_bo_size(cube_robj)); - r100_cs_track_texture_print(&track->textures[idx]); - return -1; - } - } - return 0; -} - static int r100_track_compress_size(int compress_format, int w, int h) { int block_width, block_height, block_bytes; @@ -2873,12 +3187,43 @@ static int r100_track_compress_size(int compress_format, int w, int h) return sz; } +static int r100_cs_track_cube(struct radeon_device *rdev, + struct r100_cs_track *track, unsigned idx) +{ + unsigned face, w, h; + struct radeon_bo *cube_robj; + unsigned long size; + unsigned compress_format = track->textures[idx].compress_format; + + for (face = 0; face < 5; face++) { + cube_robj = track->textures[idx].cube_info[face].robj; + w = track->textures[idx].cube_info[face].width; + h = track->textures[idx].cube_info[face].height; + + if (compress_format) { + size = r100_track_compress_size(compress_format, w, h); + } else + size = w * h; + size *= track->textures[idx].cpp; + + size += track->textures[idx].cube_info[face].offset; + + if (size > radeon_bo_size(cube_robj)) { + DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", + size, radeon_bo_size(cube_robj)); + r100_cs_track_texture_print(&track->textures[idx]); + return -1; + } + } + return 0; +} + static int r100_cs_track_texture_check(struct radeon_device *rdev, struct r100_cs_track *track) { struct radeon_bo *robj; unsigned long size; - unsigned u, i, w, h; + unsigned u, i, w, h, d; int ret; for (u = 0; u < track->num_texture; u++) { @@ -2910,20 +3255,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, h = h / (1 << i); if (track->textures[u].roundup_h) h = roundup_pow_of_two(h); + if (track->textures[u].tex_coord_type == 1) { + d = (1 << track->textures[u].txdepth) / (1 << i); + if (!d) + d = 1; + } else { + d = 1; + } if (track->textures[u].compress_format) { - size += r100_track_compress_size(track->textures[u].compress_format, w, h); + size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; /* compressed textures are block based */ } else - size += w * h; + size += w * h * d; } size *= track->textures[u].cpp; switch (track->textures[u].tex_coord_type) { case 0: - break; case 1: - size *= (1 << track->textures[u].txdepth); break; case 2: if (track->separate_cube) { @@ -2957,7 +3307,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < track->num_cb; i++) { if (track->cb[i].robj == NULL) { - if (!(track->fastfill || track->color_channel_mask || + if (!(track->zb_cb_clear || track->color_channel_mask || track->blend_read_enable)) { continue; } @@ -2994,7 +3344,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) } } prim_walk = (track->vap_vf_cntl >> 4) & 0x3; - nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + if (track->vap_vf_cntl & (1 << 14)) { + nverts = track->vap_alt_nverts; + } else { + nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + } switch (prim_walk) { case 1: for (i = 0; i < track->num_arrays; i++) { @@ -3377,7 +3731,7 @@ static int r100_startup(struct radeon_device *rdev) /* Resume clock */ r100_clock_startup(rdev); /* Initialize GPU configuration (# pipes, ...) */ - r100_gpu_init(rdev); +// r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r100_enable_bm(rdev); @@ -3414,7 +3768,7 @@ int r100_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ r100_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -3482,7 +3836,7 @@ int r100_init(struct radeon_device *rdev) return r; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -3495,8 +3849,6 @@ int r100_init(struct radeon_device *rdev) r100_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index b27a6999d219..f47cdca1c004 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h @@ -64,6 +64,7 @@ struct r100_cs_track { unsigned maxy; unsigned vtx_size; unsigned vap_vf_cntl; + unsigned vap_alt_nverts; unsigned immd_dwords; unsigned num_arrays; unsigned max_indx; @@ -74,7 +75,7 @@ struct r100_cs_track { struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; bool z_enabled; bool separate_cube; - bool fastfill; + bool zb_cb_clear; bool blend_read_enable; }; diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index df29a630c466..d016b16fa116 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h @@ -74,6 +74,134 @@ #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) /* Registers */ +#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 +#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) +#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) +#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE +#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) +#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) +#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD +#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2) +#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1) +#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB +#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) +#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) +#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 +#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) +#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) +#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF +#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) +#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) +#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF +#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) +#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) +#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF +#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) +#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) +#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F +#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) +#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) +#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF +#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) +#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) +#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF +#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) +#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) +#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF +#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) +#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) +#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF +#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) +#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) +#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF +#define R_000030_BUS_CNTL 0x000030 +#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0) +#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1) +#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE +#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1) +#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1) +#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD +#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2) +#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1) +#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB +#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3) +#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1) +#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7 +#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4) +#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1) +#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF +#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5) +#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1) +#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF +#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6) +#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1) +#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF +#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7) +#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1) +#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F +#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8) +#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1) +#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF +#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9) +#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1) +#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF +#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10) +#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1) +#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF +#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11) +#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1) +#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF +#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12) +#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1) +#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF +#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13) +#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1) +#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF +#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14) +#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1) +#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF +#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15) +#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1) +#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF +#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16) +#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF) +#define C_000030_BUS_RETRY_WS 0xFFF0FFFF +#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20) +#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1) +#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF +#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21) +#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1) +#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF +#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22) +#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1) +#define C_000030_BUS_SUSPEND 0xFFBFFFFF +#define S_000030_LAT_16X(x) (((x) & 0x1) << 23) +#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1) +#define C_000030_LAT_16X 0xFF7FFFFF +#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24) +#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1) +#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF +#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25) +#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1) +#define C_000030_ENFRCWRDY 0xFDFFFFFF +#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26) +#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1) +#define C_000030_BUS_MSTR_WS 0xFBFFFFFF +#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27) +#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1) +#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF +#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28) +#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1) +#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF +#define S_000030_SERR_EN(x) (((x) & 0x1) << 29) +#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1) +#define C_000030_SERR_EN 0xDFFFFFFF +#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30) +#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1) +#define C_000030_BUS_READ_BURST 0xBFFFFFFF +#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31) +#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1) +#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF #define R_000040_GEN_INT_CNTL 0x000040 #define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) #define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) @@ -710,5 +838,41 @@ #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) #define C_00000D_FORCE_RB 0xEFFFFFFF +/* PLL regs */ +#define SCLK_CNTL 0xd +#define FORCE_HDP (1 << 17) +#define CLK_PWRMGT_CNTL 0x14 +#define GLOBAL_PMAN_EN (1 << 10) +#define DISP_PM (1 << 20) +#define PLL_PWRMGT_CNTL 0x15 +#define MPLL_TURNOFF (1 << 0) +#define SPLL_TURNOFF (1 << 1) +#define PPLL_TURNOFF (1 << 2) +#define P2PLL_TURNOFF (1 << 3) +#define TVPLL_TURNOFF (1 << 4) +#define MOBILE_SU (1 << 16) +#define SU_SCLK_USE_BCLK (1 << 17) +#define SCLK_CNTL2 0x1e +#define REDUCED_SPEED_SCLK_MODE (1 << 16) +#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17) +#define MCLK_MISC 0x1f +#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18) +#define SCLK_MORE_CNTL 0x35 +#define REDUCED_SPEED_SCLK_EN (1 << 16) +#define IO_CG_VOLTAGE_DROP (1 << 17) +#define VOLTAGE_DELAY_SEL(x) ((x) << 20) +#define VOLTAGE_DROP_SYNC (1 << 19) + +/* mmreg */ +#define DISP_PWR_MAN 0xd08 +#define DISP_D3_GRPH_RST (1 << 18) +#define DISP_D3_SUBPIC_RST (1 << 19) +#define DISP_D3_OV0_RST (1 << 20) +#define DISP_D1D2_GRPH_RST (1 << 21) +#define DISP_D1D2_SUBPIC_RST (1 << 22) +#define DISP_D1D2_OV0_RST (1 << 23) +#define DISP_DVO_ENABLE_RST (1 << 24) +#define TV_ENABLE_RST (1 << 25) +#define AUTO_PWRUP_EN (1 << 26) #endif diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 1146c9909c2c..0266d72e0a4c 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -30,6 +30,7 @@ #include "radeon_drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_asic.h" #include "r100d.h" #include "r200_reg_safe.h" @@ -414,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, /* 2D, 3D, CUBE */ switch (tmp) { case 0: + case 3: + case 4: case 5: case 6: case 7: @@ -449,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_RGB332: case R200_TXFORMAT_Y8: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_AI88: case R200_TXFORMAT_ARGB1555: @@ -460,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_DVDU88: case R200_TXFORMAT_AVYU4444: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_ARGB8888: case R200_TXFORMAT_RGBA8888: @@ -467,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_BGR111110: case R200_TXFORMAT_LDVDU8888: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R200_TXFORMAT_DXT1: track->textures[i].cpp = 1; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 4cef90cd74e5..19a7ef7ee344 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -26,10 +26,13 @@ * Jerome Glisse */ #include <linux/seq_file.h> -#include "drmP.h" -#include "drm.h" +#include <linux/slab.h> +#include <drm/drmP.h> +#include <drm/drm.h> +#include <drm/drm_crtc_helper.h> #include "radeon_reg.h" #include "radeon.h" +#include "radeon_asic.h" #include "radeon_drm.h" #include "r100_track.h" #include "r300d.h" @@ -149,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) u32 tmp; int r; + WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); + WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); + WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); + WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); @@ -164,9 +171,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) void rv370_pcie_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); rv370_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } void r300_fence_ring_emit(struct radeon_device *rdev, @@ -321,13 +328,12 @@ void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; - r100_hdp_reset(rdev); - /* FIXME: rv380 one pipes ? */ - if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || + (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { /* r300,r350 */ rdev->num_gb_pipes = 2; } else { - /* rv350,rv370,rv380 */ + /* rv350,rv370,rv380,r300 AD, r350 AH */ rdev->num_gb_pipes = 1; } rdev->num_z_pipes = 1; @@ -373,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev) rdev->num_gb_pipes, rdev->num_z_pipes); } -int r300_ga_reset(struct radeon_device *rdev) +bool r300_gpu_is_lockup(struct radeon_device *rdev) { - uint32_t tmp; - bool reinit_cp; - int i; + u32 rbbm_status; + int r; - reinit_cp = rdev->cp.ready; - rdev->cp.ready = false; - for (i = 0; i < rdev->usec_timeout; i++) { - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); - (void)RREG32(RADEON_RBBM_SOFT_RESET); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); - if (tmp & ((1 << 20) | (1 << 26))) { - DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); - /* GA still busy soft reset it */ - WREG32(0x429C, 0x200); - WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); - WREG32(R300_RE_SCISSORS_TL, 0); - WREG32(R300_RE_SCISSORS_BR, 0); - WREG32(0x24AC, 0); - } - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & ((1 << 20) | (1 << 26)))) { - break; - } + rbbm_status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(rbbm_status)) { + r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); + return false; } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & ((1 << 20) | (1 << 26)))) { - DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - if (reinit_cp) { - return r100_cp_init(rdev, rdev->cp.ring_size); - } - return 0; - } - DRM_UDELAY(1); + /* force CP activities */ + r = radeon_ring_lock(rdev, 2); + if (!r) { + /* PACKET2 NOP */ + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_unlock_commit(rdev); } - tmp = RREG32(RADEON_RBBM_STATUS); - DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); - return -1; + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); } -int r300_gpu_reset(struct radeon_device *rdev) +int r300_asic_reset(struct radeon_device *rdev) { - uint32_t status; - - /* reset order likely matter */ - status = RREG32(RADEON_RBBM_STATUS); - /* reset HDP */ - r100_hdp_reset(rdev); - /* reset rb2d */ - if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { - r100_rb2d_reset(rdev); - } - /* reset GA */ - if (status & ((1 << 20) | (1 << 26))) { - r300_ga_reset(rdev); - } - /* reset CP */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 16)) { - r100_cp_reset(rdev); + struct r100_mc_save save; + u32 status, tmp; + + r100_mc_stop(rdev, &save); + status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(status)) { + return 0; } + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* stop CP */ + WREG32(RADEON_CP_CSQ_CNTL, 0); + tmp = RREG32(RADEON_CP_RB_CNTL); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_RPTR_WR, 0); + WREG32(RADEON_CP_RB_WPTR, 0); + WREG32(RADEON_CP_RB_CNTL, tmp); + /* save PCI state */ + pci_save_state(rdev->pdev); + /* disable bus mastering */ + r100_bm_disable(rdev); + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | + S_0000F0_SOFT_RESET_GA(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* resetting the CP seems to be problematic sometimes it end up + * hard locking the computer, but it's necessary for successfull + * reset more test & playing is needed on R3XX/R4XX to find a + * reliable (if any solution) + */ + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* restore PCI & busmastering */ + pci_restore_state(rdev->pdev); + r100_enable_bm(rdev); /* Check if GPU is idle */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & RADEON_RBBM_ACTIVE) { - DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); + if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { + dev_err(rdev->dev, "failed to reset GPU\n"); + rdev->gpu_lockup = true; return -1; } - DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); + r100_mc_resume(rdev, &save); + dev_info(rdev->dev, "GPU reset succeed\n"); return 0; } - /* * r300,r350,rv350,rv380 VRAM info */ @@ -479,8 +481,10 @@ void r300_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; if (!(rdev->flags & RADEON_IS_AGP)) radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); } void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) @@ -726,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* VAP_VF_MAX_VTX_INDX */ track->max_indx = idx_value & 0x00FFFFFFUL; break; + case 0x2088: + /* VAP_ALT_NUM_VERTICES - only valid on r500 */ + if (p->rdev->family < CHIP_RV515) + goto fail; + track->vap_alt_nverts = idx_value & 0xFFFFFF; + break; case 0x43E4: /* SC_SCISSOR1 */ track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; @@ -763,7 +773,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; ib[idx] = tmp; - i = (reg - 0x4E38) >> 2; track->cb[i].pitch = idx_value & 0x3FFE; switch (((idx_value >> 21) & 0xF)) { @@ -873,6 +882,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_Y4X4: case R300_TX_FORMAT_Z3Y3X2: track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_X16: case R300_TX_FORMAT_Y8X8: @@ -884,6 +894,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_G8R8_G8B8: track->textures[i].cpp = 2; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_Y16X16: case R300_TX_FORMAT_Z11Y11X10: @@ -894,14 +905,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_FL_I32: case 0x1e: track->textures[i].cpp = 4; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_W16Z16Y16X16: case R300_TX_FORMAT_FL_R16G16B16A16: case R300_TX_FORMAT_FL_I32A32: track->textures[i].cpp = 8; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_FL_R32G32B32A32: track->textures[i].cpp = 16; + track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_DXT1: track->textures[i].cpp = 1; @@ -1036,7 +1050,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4d1c: /* ZB_BW_CNTL */ - track->fastfill = !!(idx_value & (1 << 2)); + track->zb_cb_clear = !!(idx_value & (1 << 5)); break; case 0x4e04: /* RB3D_BLENDCNTL */ @@ -1048,11 +1062,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; /* fallthrough do not move */ default: - printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", - reg, idx); - return -EINVAL; + goto fail; } return 0; +fail: + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", + reg, idx); + return -EINVAL; } static int r300_packet3_check(struct radeon_cs_parser *p, @@ -1161,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p) int r; track = kzalloc(sizeof(*track), GFP_KERNEL); + if (track == NULL) + return -ENOMEM; r100_cs_track_clear(p->rdev, track); p->track = track; do { @@ -1306,7 +1324,7 @@ int r300_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ r300_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -1376,7 +1394,7 @@ int r300_init(struct radeon_device *rdev) return r; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -1389,8 +1407,6 @@ int r300_init(struct radeon_device *rdev) r300_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index ea46d558e8f3..c5c2742e4140 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, ptr_addr = drm_buffer_read_object(cmdbuf->buffer, sizeof(stack_ptr_addr), &stack_ptr_addr); - ref_age_base = (u32 *)(unsigned long)*ptr_addr; + ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr); for (i=0; i < header.scratch.n_bufs; i++) { buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 4c73114f0de9..968a33317fbf 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h @@ -209,7 +209,52 @@ #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) #define C_000E40_GUI_ACTIVE 0x7FFFFFFF - +#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 +#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) +#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) +#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE +#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) +#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) +#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD +#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) +#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) +#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB +#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) +#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) +#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 +#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) +#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) +#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF +#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) +#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) +#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF +#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) +#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) +#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF +#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) +#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) +#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F +#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) +#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) +#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF +#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) +#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) +#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF +#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) +#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) +#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF +#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) +#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) +#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF +#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) +#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) +#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF +#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) +#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) +#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF +#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) +#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) +#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF #define R_00000D_SCLK_CNTL 0x00000D #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c7593b8f58ee..e6c89142bb4d 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -26,14 +26,55 @@ * Jerome Glisse */ #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" #include "r100d.h" #include "r420d.h" #include "r420_reg_safe.h" +void r420_pm_init_profile(struct radeon_device *rdev) +{ + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; +} + static void r420_set_reg_safe(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; @@ -57,6 +98,12 @@ void r420_pipes_init(struct radeon_device *rdev) /* get max number of pipes */ gb_pipe_select = RREG32(0x402C); num_pipes = ((gb_pipe_select >> 12) & 3) + 1; + + /* SE chips have 1 pipe */ + if ((rdev->pdev->device == 0x5e4c) || + (rdev->pdev->device == 0x5e4f)) + num_pipes = 1; + rdev->num_gb_pipes = num_pipes; tmp = 0; switch (num_pipes) { @@ -233,7 +280,7 @@ int r420_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ r420_clock_resume(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -313,7 +360,7 @@ int r420_init(struct radeon_device *rdev) } } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -325,8 +372,6 @@ int r420_init(struct radeon_device *rdev) /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 0cf2ad2a5585..93c9a2bbccf8 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -347,9 +347,11 @@ #define AVIVO_D1CRTC_CONTROL 0x6080 # define AVIVO_CRTC_EN (1 << 0) +# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c +#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 @@ -488,6 +490,7 @@ #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c +#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0 #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 2b8a5dd13516..694af7cc23ac 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -27,6 +27,7 @@ */ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" #include "r520d.h" @@ -52,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev) { unsigned pipe_select_current, gb_pipe_select, tmp; - r100_hdp_reset(rdev); rv515_vga_render_disable(rdev); /* * DST_PIPE_CONFIG 0x170C @@ -121,19 +121,14 @@ static void r520_vram_get_type(struct radeon_device *rdev) void r520_mc_init(struct radeon_device *rdev) { - fixed20_12 a; r520_vram_get_type(rdev); r100_vram_init_sizes(rdev); radeon_vram_location(rdev, &rdev->mc, 0); + rdev->mc.gtt_base_align = 0; if (!(rdev->flags & RADEON_IS_AGP)) radeon_gtt_location(rdev, &rdev->mc); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + radeon_update_bandwidth_info(rdev); } void r520_mc_program(struct radeon_device *rdev) @@ -214,7 +209,7 @@ int r520_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ rv515_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -251,7 +246,7 @@ int r520_init(struct radeon_device *rdev) return -EINVAL; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -267,8 +262,6 @@ int r520_init(struct radeon_device *rdev) } /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c52290197292..e100f69faeec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -25,12 +25,14 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/slab.h> #include <linux/seq_file.h> #include <linux/firmware.h> #include <linux/platform_device.h> #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" +#include "radeon_asic.h" #include "radeon_mode.h" #include "r600d.h" #include "atom.h" @@ -42,6 +44,9 @@ #define R700_PFP_UCODE_SIZE 848 #define R700_PM4_UCODE_SIZE 1360 #define R700_RLC_UCODE_SIZE 1024 +#define EVERGREEN_PFP_UCODE_SIZE 1120 +#define EVERGREEN_PM4_UCODE_SIZE 1376 +#define EVERGREEN_RLC_UCODE_SIZE 768 /* Firmware Names */ MODULE_FIRMWARE("radeon/R600_pfp.bin"); @@ -66,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin"); MODULE_FIRMWARE("radeon/RV710_me.bin"); MODULE_FIRMWARE("radeon/R600_rlc.bin"); MODULE_FIRMWARE("radeon/R700_rlc.bin"); +MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); +MODULE_FIRMWARE("radeon/CEDAR_me.bin"); +MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); +MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); +MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); +MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); @@ -73,6 +90,499 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); +void r600_irq_disable(struct radeon_device *rdev); + +void r600_pm_get_dynpm_state(struct radeon_device *rdev) +{ + int i; + + rdev->pm.dynpm_can_upclock = true; + rdev->pm.dynpm_can_downclock = true; + + /* power state array is low to high, default is first */ + if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { + int min_power_state_index = 0; + + if (rdev->pm.num_power_states > 2) + min_power_state_index = 1; + + switch (rdev->pm.dynpm_planned_action) { + case DYNPM_ACTION_MINIMUM: + rdev->pm.requested_power_state_index = min_power_state_index; + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_downclock = false; + break; + case DYNPM_ACTION_DOWNCLOCK: + if (rdev->pm.current_power_state_index == min_power_state_index) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_downclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i >= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else { + if (rdev->pm.current_power_state_index == 0) + rdev->pm.requested_power_state_index = + rdev->pm.num_power_states - 1; + else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index - 1; + } + } + rdev->pm.requested_clock_mode_index = 0; + /* don't use the power state if crtcs are active and no display flag is set */ + if ((rdev->pm.active_crtc_count > 0) && + (rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].flags & + RADEON_PM_MODE_NO_DISPLAY)) { + rdev->pm.requested_power_state_index++; + } + break; + case DYNPM_ACTION_UPCLOCK: + if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_upclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i <= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index + 1; + } + rdev->pm.requested_clock_mode_index = 0; + break; + case DYNPM_ACTION_DEFAULT: + rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_upclock = false; + break; + case DYNPM_ACTION_NONE: + default: + DRM_ERROR("Requested mode for not defined action\n"); + return; + } + } else { + /* XXX select a power state based on AC/DC, single/dualhead, etc. */ + /* for now just select the first power state and switch between clock modes */ + /* power state array is low to high, default is first (0) */ + if (rdev->pm.active_crtc_count > 1) { + rdev->pm.requested_power_state_index = -1; + /* start at 1 as we don't want the default mode */ + for (i = 1; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || + (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { + rdev->pm.requested_power_state_index = i; + break; + } + } + /* if nothing selected, grab the default state. */ + if (rdev->pm.requested_power_state_index == -1) + rdev->pm.requested_power_state_index = 0; + } else + rdev->pm.requested_power_state_index = 1; + + switch (rdev->pm.dynpm_planned_action) { + case DYNPM_ACTION_MINIMUM: + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_downclock = false; + break; + case DYNPM_ACTION_DOWNCLOCK: + if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { + if (rdev->pm.current_clock_mode_index == 0) { + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_downclock = false; + } else + rdev->pm.requested_clock_mode_index = + rdev->pm.current_clock_mode_index - 1; + } else { + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_downclock = false; + } + /* don't use the power state if crtcs are active and no display flag is set */ + if ((rdev->pm.active_crtc_count > 0) && + (rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].flags & + RADEON_PM_MODE_NO_DISPLAY)) { + rdev->pm.requested_clock_mode_index++; + } + break; + case DYNPM_ACTION_UPCLOCK: + if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { + if (rdev->pm.current_clock_mode_index == + (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { + rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; + rdev->pm.dynpm_can_upclock = false; + } else + rdev->pm.requested_clock_mode_index = + rdev->pm.current_clock_mode_index + 1; + } else { + rdev->pm.requested_clock_mode_index = + rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; + rdev->pm.dynpm_can_upclock = false; + } + break; + case DYNPM_ACTION_DEFAULT: + rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.requested_clock_mode_index = 0; + rdev->pm.dynpm_can_upclock = false; + break; + case DYNPM_ACTION_NONE: + default: + DRM_ERROR("Requested mode for not defined action\n"); + return; + } + } + + DRM_DEBUG("Requested: e: %d m: %d p: %d\n", + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].sclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].mclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + pcie_lanes); +} + +static int r600_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance) +{ + int i; + int found_instance = -1; + + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].type == ps_type) { + found_instance++; + if (found_instance == instance) + return i; + } + } + /* return default if no match */ + return rdev->pm.default_power_state_index; +} + +void rs780_pm_init_profile(struct radeon_device *rdev) +{ + if (rdev->pm.num_power_states == 2) { + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; + } else if (rdev->pm.num_power_states == 3) { + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; + } else { + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; + } +} + +void r600_pm_init_profile(struct radeon_device *rdev) +{ + if (rdev->family == CHIP_R600) { + /* XXX */ + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* mid mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; + } else { + if (rdev->pm.num_power_states < 4) { + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* mid sh */ + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; + } else { + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; + /* low sh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + } else { + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + } + /* mid sh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; + } else { + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; + } + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; + /* low mh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + } else { + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + } + /* mid mh */ + if (rdev->flags & RADEON_IS_MOBILITY) { + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; + } else { + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; + } + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = + r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; + } + } +} + +void r600_pm_misc(struct radeon_device *rdev) +{ + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } +} + +bool r600_gui_idle(struct radeon_device *rdev) +{ + if (RREG32(GRBM_STATUS) & GUI_ACTIVE) + return false; + else + return true; +} /* hpd for digital panel detect/disconnect */ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) @@ -491,9 +1001,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) void r600_pcie_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); r600_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } void r600_agp_enable(struct radeon_device *rdev) @@ -592,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); @@ -669,13 +1179,13 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) if (rdev->flags & RADEON_IS_IGP) base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); } } int r600_mc_init(struct radeon_device *rdev) { - fixed20_12 a; u32 tmp; int chansize, numchan; @@ -713,20 +1223,13 @@ int r600_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; - /* FIXME remove this once we support unmappable VRAM */ - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { - rdev->mc.mc_vram_size = rdev->mc.aper_size; - rdev->mc.real_vram_size = rdev->mc.aper_size; - } r600_vram_gtt_location(rdev, &rdev->mc); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); - if (rdev->flags & RADEON_IS_IGP) + + if (rdev->flags & RADEON_IS_IGP) { + rs690_pm_info(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + } + radeon_update_bandwidth_info(rdev); return 0; } @@ -753,7 +1256,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); - u32 srbm_reset = 0; u32 tmp; dev_info(rdev->dev, "GPU softreset \n"); @@ -768,7 +1270,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } /* Disable CP parsing/prefetching */ - WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); + WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); /* Check if any of the rendering block is busy and reset it */ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { @@ -787,72 +1289,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008020_SOFT_RESET_VGT(1); dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(R_008020_GRBM_SOFT_RESET, tmp); - (void)RREG32(R_008020_GRBM_SOFT_RESET); - udelay(50); + RREG32(R_008020_GRBM_SOFT_RESET); + mdelay(15); WREG32(R_008020_GRBM_SOFT_RESET, 0); - (void)RREG32(R_008020_GRBM_SOFT_RESET); } /* Reset CP (we always reset CP) */ tmp = S_008020_SOFT_RESET_CP(1); dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(R_008020_GRBM_SOFT_RESET, tmp); - (void)RREG32(R_008020_GRBM_SOFT_RESET); - udelay(50); + RREG32(R_008020_GRBM_SOFT_RESET); + mdelay(15); WREG32(R_008020_GRBM_SOFT_RESET, 0); - (void)RREG32(R_008020_GRBM_SOFT_RESET); - /* Reset others GPU block if necessary */ - if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_RLC(1); - if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_GRBM(1); - if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_IH(1); - if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_VMC(1); - if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_MC(1); - if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_MC(1); - if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_MC(1); - if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_MC(1); - if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_MC(1); - if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_RLC(1); - if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_SEM(1); - if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_BIF(1); - dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); - WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); - udelay(50); - WREG32(R_000E60_SRBM_SOFT_RESET, 0); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); - WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); - udelay(50); - WREG32(R_000E60_SRBM_SOFT_RESET, 0); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); /* Wait a little for things to settle down */ - udelay(50); + mdelay(1); dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", RREG32(R_008010_GRBM_STATUS)); dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", RREG32(R_008014_GRBM_STATUS2)); dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", RREG32(R_000E50_SRBM_STATUS)); - /* After reset we need to reinit the asic as GPU often endup in an - * incoherent state. - */ - atom_asic_init(rdev->mode_info.atom_context); rv515_mc_resume(rdev, &save); return 0; } -int r600_gpu_reset(struct radeon_device *rdev) +bool r600_gpu_is_lockup(struct radeon_device *rdev) +{ + u32 srbm_status; + u32 grbm_status; + u32 grbm_status2; + int r; + + srbm_status = RREG32(R_000E50_SRBM_STATUS); + grbm_status = RREG32(R_008010_GRBM_STATUS); + grbm_status2 = RREG32(R_008014_GRBM_STATUS2); + if (!G_008010_GUI_ACTIVE(grbm_status)) { + r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); + return false; + } + /* force CP activities */ + r = radeon_ring_lock(rdev, 2); + if (!r) { + /* PACKET2 NOP */ + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_unlock_commit(rdev); + } + rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); +} + +int r600_asic_reset(struct radeon_device *rdev) { return r600_gpu_soft_reset(rdev); } @@ -1132,6 +1618,7 @@ void r600_gpu_init(struct radeon_device *rdev) /* Setup pipes */ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); + WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); @@ -1469,10 +1956,31 @@ int r600_init_microcode(struct radeon_device *rdev) chip_name = "RV710"; rlc_chip_name = "R700"; break; + case CHIP_CEDAR: + chip_name = "CEDAR"; + rlc_chip_name = "CEDAR"; + break; + case CHIP_REDWOOD: + chip_name = "REDWOOD"; + rlc_chip_name = "REDWOOD"; + break; + case CHIP_JUNIPER: + chip_name = "JUNIPER"; + rlc_chip_name = "JUNIPER"; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + chip_name = "CYPRESS"; + rlc_chip_name = "CYPRESS"; + break; default: BUG(); } - if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_CEDAR) { + pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; + me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; + rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; + } else if (rdev->family >= CHIP_RV770) { pfp_req_size = R700_PFP_UCODE_SIZE * 4; me_req_size = R700_PM4_UCODE_SIZE * 4; rlc_req_size = R700_RLC_UCODE_SIZE * 4; @@ -1586,12 +2094,15 @@ int r600_cp_start(struct radeon_device *rdev) } radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); radeon_ring_write(rdev, 0x1); - if (rdev->family < CHIP_RV770) { - radeon_ring_write(rdev, 0x3); - radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); - } else { + if (rdev->family >= CHIP_CEDAR) { + radeon_ring_write(rdev, 0x0); + radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); + } else if (rdev->family >= CHIP_RV770) { radeon_ring_write(rdev, 0x0); radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); + } else { + radeon_ring_write(rdev, 0x3); + radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); } radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(rdev, 0); @@ -2053,8 +2564,6 @@ int r600_init(struct radeon_device *rdev) r = radeon_clocks_init(rdev); if (r) return r; - /* Initialize power management */ - radeon_pm_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -2291,10 +2800,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev) } } -static void r600_rlc_stop(struct radeon_device *rdev) +void r600_rlc_stop(struct radeon_device *rdev) { - if (rdev->family >= CHIP_RV770) { + if ((rdev->family >= CHIP_RV770) && + (rdev->family <= CHIP_RV740)) { /* r7xx asics need to soft reset RLC before halting */ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); RREG32(SRBM_SOFT_RESET); @@ -2331,7 +2841,12 @@ static int r600_rlc_init(struct radeon_device *rdev) WREG32(RLC_UCODE_CNTL, 0); fw_data = (const __be32 *)rdev->rlc_fw->data; - if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_CEDAR) { + for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } else if (rdev->family >= CHIP_RV770) { for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { WREG32(RLC_UCODE_ADDR, i); WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); @@ -2361,7 +2876,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev) rdev->ih.enabled = true; } -static void r600_disable_interrupts(struct radeon_device *rdev) +void r600_disable_interrupts(struct radeon_device *rdev) { u32 ih_rb_cntl = RREG32(IH_RB_CNTL); u32 ih_cntl = RREG32(IH_CNTL); @@ -2398,19 +2913,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) WREG32(DC_HPD4_INT_CONTROL, tmp); if (ASIC_IS_DCE32(rdev)) { tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD5_INT_CONTROL, 0); + WREG32(DC_HPD5_INT_CONTROL, tmp); tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD6_INT_CONTROL, 0); + WREG32(DC_HPD6_INT_CONTROL, tmp); } } else { WREG32(DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DACB_AUTODETECT_INT_CONTROL, 0); tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; - WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; - WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; - WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); } } @@ -2476,7 +2991,10 @@ int r600_irq_init(struct radeon_device *rdev) WREG32(IH_CNTL, ih_cntl); /* force the active interrupt state to all disabled */ - r600_disable_interrupt_state(rdev); + if (rdev->family >= CHIP_CEDAR) + evergreen_disable_interrupt_state(rdev); + else + r600_disable_interrupt_state(rdev); /* enable irqs */ r600_enable_interrupts(rdev); @@ -2486,7 +3004,7 @@ int r600_irq_init(struct radeon_device *rdev) void r600_irq_suspend(struct radeon_device *rdev) { - r600_disable_interrupts(rdev); + r600_irq_disable(rdev); r600_rlc_stop(rdev); } @@ -2501,6 +3019,8 @@ int r600_irq_set(struct radeon_device *rdev) u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 mode_int = 0; u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; + u32 grbm_int_cntl = 0; + u32 hdmi1, hdmi2; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); @@ -2514,7 +3034,9 @@ int r600_irq_set(struct radeon_device *rdev) return 0; } + hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; if (ASIC_IS_DCE3(rdev)) { + hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; @@ -2524,6 +3046,7 @@ int r600_irq_set(struct radeon_device *rdev) hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; } } else { + hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; @@ -2565,10 +3088,25 @@ int r600_irq_set(struct radeon_device *rdev) DRM_DEBUG("r600_irq_set: hpd 6\n"); hpd6 |= DC_HPDx_INT_EN; } + if (rdev->irq.hdmi[0]) { + DRM_DEBUG("r600_irq_set: hdmi 1\n"); + hdmi1 |= R600_HDMI_INT_EN; + } + if (rdev->irq.hdmi[1]) { + DRM_DEBUG("r600_irq_set: hdmi 2\n"); + hdmi2 |= R600_HDMI_INT_EN; + } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DxMODE_INT_MASK, mode_int); + WREG32(GRBM_INT_CNTL, grbm_int_cntl); + WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); if (ASIC_IS_DCE3(rdev)) { + WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -2578,6 +3116,7 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(DC_HPD6_INT_CONTROL, hpd6); } } else { + WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2); WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); @@ -2661,6 +3200,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HPD6_INT_CONTROL, tmp); } } + if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { + WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); + } + if (ASIC_IS_DCE3(rdev)) { + if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { + WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); + } + } else { + if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { + WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); + } + } } void r600_irq_disable(struct radeon_device *rdev) @@ -2714,6 +3265,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) * 19 1 FP Hot plug detection B * 19 2 DAC A auto-detection * 19 3 DAC B auto-detection + * 21 4 HDMI block A + * 21 5 HDMI block B * 176 - CP_INT RB * 177 - CP_INT IB1 * 178 - CP_INT IB2 @@ -2765,6 +3318,7 @@ restart_ih: case 0: /* D1 vblank */ if (disp_int & LB_D1_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); @@ -2786,6 +3340,7 @@ restart_ih: case 0: /* D2 vblank */ if (disp_int & LB_D2_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); @@ -2834,14 +3389,14 @@ restart_ih: break; case 10: if (disp_int_cont2 & DC_HPD5_INTERRUPT) { - disp_int_cont &= ~DC_HPD5_INTERRUPT; + disp_int_cont2 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 12: if (disp_int_cont2 & DC_HPD6_INTERRUPT) { - disp_int_cont &= ~DC_HPD6_INTERRUPT; + disp_int_cont2 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } @@ -2851,6 +3406,10 @@ restart_ih: break; } break; + case 21: /* HDMI */ + DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); + r600_audio_schedule_polling(rdev); + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ @@ -2860,6 +3419,11 @@ restart_ih: case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); break; + case 233: /* GUI IDLE */ + DRM_DEBUG("IH: CP EOP\n"); + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index db928016d034..2b26553c352c 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return rdev->family >= CHIP_R600 + return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; @@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev) /* * current number of channels */ -static int r600_audio_channels(struct radeon_device *rdev) +int r600_audio_channels(struct radeon_device *rdev) { return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; } @@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev) /* * current bits per sample */ -static int r600_audio_bits_per_sample(struct radeon_device *rdev) +int r600_audio_bits_per_sample(struct radeon_device *rdev) { uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; switch (value) { @@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev) /* * current sampling rate in HZ */ -static int r600_audio_rate(struct radeon_device *rdev) +int r600_audio_rate(struct radeon_device *rdev) { uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); uint32_t result; @@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev) /* * iec 60958 status bits */ -static uint8_t r600_audio_status_bits(struct radeon_device *rdev) +uint8_t r600_audio_status_bits(struct radeon_device *rdev) { return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; } @@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev) /* * iec 60958 category code */ -static uint8_t r600_audio_category_code(struct radeon_device *rdev) +uint8_t r600_audio_category_code(struct radeon_device *rdev) { return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; } /* + * schedule next audio update event + */ +void r600_audio_schedule_polling(struct radeon_device *rdev) +{ + mod_timer(&rdev->audio_timer, + jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); +} + +/* * update all hdmi interfaces with current audio parameters */ static void r600_audio_update_hdmi(unsigned long param) @@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param) uint8_t category_code = r600_audio_category_code(rdev); struct drm_encoder *encoder; - int changes = 0; + int changes = 0, still_going = 0; changes |= channels != rdev->audio_channels; changes |= rate != rdev->audio_rate; @@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + still_going |= radeon_encoder->audio_polling_active; if (changes || r600_hdmi_buffer_status_changed(encoder)) - r600_hdmi_update_audio_settings( - encoder, channels, - rate, bps, status_bits, - category_code); + r600_hdmi_update_audio_settings(encoder); } - mod_timer(&rdev->audio_timer, - jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); + if(still_going) r600_audio_schedule_polling(rdev); } /* @@ -176,44 +183,34 @@ int r600_audio_init(struct radeon_device *rdev) r600_audio_update_hdmi, (unsigned long)rdev); - mod_timer(&rdev->audio_timer, jiffies + 1); - return 0; } /* - * determin how the encoders and audio interface is wired together + * enable the polling timer, to check for status changes */ -int r600_audio_tmds_index(struct drm_encoder *encoder) +void r600_audio_enable_polling(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *other; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - return 0; - - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - /* special case check if an TMDS1 is present */ - list_for_each_entry(other, &dev->mode_config.encoder_list, head) { - if (to_radeon_encoder(other)->encoder_id == - ENCODER_OBJECT_ID_INTERNAL_TMDS1) - return 1; - } - return 0; + DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active); + if (radeon_encoder->audio_polling_active) + return; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - return 1; + radeon_encoder->audio_polling_active = 1; + mod_timer(&rdev->audio_timer, jiffies + 1); +} - default: - DRM_ERROR("Unsupported encoder type 0x%02X\n", - radeon_encoder->encoder_id); - return -1; - } +/* + * disable the polling timer, so we get no more status updates + */ +void r600_audio_disable_polling(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active); + radeon_encoder->audio_polling_active = 0; } /* @@ -224,6 +221,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; int base_rate = 48000; switch (radeon_encoder->encoder_id) { @@ -231,32 +229,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) case ENCODER_OBJECT_ID_INTERNAL_LVTM1: WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); break; - default: DRM_ERROR("Unsupported encoder type 0x%02X\n", radeon_encoder->encoder_id); return; } - switch (r600_audio_tmds_index(encoder)) { + switch (dig->dig_encoder) { case 0: - WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); - WREG32(R600_AUDIO_PLL1_DIV, clock*100); + WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50); + WREG32(R600_AUDIO_PLL1_DIV, clock * 100); WREG32(R600_AUDIO_CLK_SRCSEL, 0); break; case 1: - WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); - WREG32(R600_AUDIO_PLL2_DIV, clock*100); + WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50); + WREG32(R600_AUDIO_PLL2_DIV, clock * 100); WREG32(R600_AUDIO_CLK_SRCSEL, 1); break; + default: + dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n", + radeon_encoder->encoder_id); + return; } } diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index f4fb88ece2bb..ca5c29f70779 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -538,9 +538,12 @@ int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; + int ret; DRM_DEBUG("\n"); - r600_nomm_get_vb(dev); + ret = r600_nomm_get_vb(dev); + if (ret) + return ret; dev_priv->blit_vb->file_priv = file_priv; diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index f6c6c77db7e0..d13622ae74e9 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev) u32 packet2s[16]; int num_packet2s = 0; + /* don't reinitialize blit */ + if (rdev->r600_blit.shader_obj) + return 0; mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index a112c59f9d82..0271b53fa2dd 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c @@ -1,7 +1,42 @@ +/* + * Copyright 2009 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Alex Deucher <alexander.deucher@amd.com> + */ #include <linux/types.h> #include <linux/kernel.h> +/* + * R6xx+ cards need to use the 3D engine to blit data which requires + * quite a bit of hw state setup. Rather than pull the whole 3D driver + * (which normally generates the 3D state) into the DRM, we opt to use + * statically generated state tables. The regsiter state and shaders + * were hand generated to support blitting functionality. See the 3D + * driver or documentation for descriptions of the registers and + * shader instructions. + */ + const u32 r6xx_default_state[] = { 0xc0002400, diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 40416c068d9f..68e6f4349309 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev, RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); + RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); + RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0); + RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0); num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index cd2c63bce501..144c32d37136 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -45,6 +45,7 @@ struct r600_cs_track { u32 nbanks; u32 npipes; /* value we track */ + u32 sq_config; u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; @@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track) { int i; + /* assume DX9 mode */ + track->sq_config = DX9_CONSTS; for (i = 0; i < 8; i++) { track->cb_color_base_last[i] = 0; track->cb_color_size[i] = 0; @@ -582,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = CP_PACKET0_GET_REG(header); - mutex_lock(&p->rdev->ddev->mode_config.mutex); + obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_ERROR("cannot find crtc %d\n", crtc_id); @@ -617,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; } out: - mutex_unlock(&p->rdev->ddev->mode_config.mutex); return r; } @@ -715,6 +717,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx tmp =radeon_get_ib_value(p, idx); ib[idx] = 0; break; + case SQ_CONFIG: + track->sq_config = radeon_get_ib_value(p, idx); + break; case R_028800_DB_DEPTH_CONTROL: track->db_depth_control = radeon_get_ib_value(p, idx); break; @@ -869,6 +874,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx case SQ_PGM_START_VS: case SQ_PGM_START_GS: case SQ_PGM_START_PS: + case SQ_ALU_CONST_CACHE_GS_0: + case SQ_ALU_CONST_CACHE_GS_1: + case SQ_ALU_CONST_CACHE_GS_2: + case SQ_ALU_CONST_CACHE_GS_3: + case SQ_ALU_CONST_CACHE_GS_4: + case SQ_ALU_CONST_CACHE_GS_5: + case SQ_ALU_CONST_CACHE_GS_6: + case SQ_ALU_CONST_CACHE_GS_7: + case SQ_ALU_CONST_CACHE_GS_8: + case SQ_ALU_CONST_CACHE_GS_9: + case SQ_ALU_CONST_CACHE_GS_10: + case SQ_ALU_CONST_CACHE_GS_11: + case SQ_ALU_CONST_CACHE_GS_12: + case SQ_ALU_CONST_CACHE_GS_13: + case SQ_ALU_CONST_CACHE_GS_14: + case SQ_ALU_CONST_CACHE_GS_15: + case SQ_ALU_CONST_CACHE_PS_0: + case SQ_ALU_CONST_CACHE_PS_1: + case SQ_ALU_CONST_CACHE_PS_2: + case SQ_ALU_CONST_CACHE_PS_3: + case SQ_ALU_CONST_CACHE_PS_4: + case SQ_ALU_CONST_CACHE_PS_5: + case SQ_ALU_CONST_CACHE_PS_6: + case SQ_ALU_CONST_CACHE_PS_7: + case SQ_ALU_CONST_CACHE_PS_8: + case SQ_ALU_CONST_CACHE_PS_9: + case SQ_ALU_CONST_CACHE_PS_10: + case SQ_ALU_CONST_CACHE_PS_11: + case SQ_ALU_CONST_CACHE_PS_12: + case SQ_ALU_CONST_CACHE_PS_13: + case SQ_ALU_CONST_CACHE_PS_14: + case SQ_ALU_CONST_CACHE_PS_15: + case SQ_ALU_CONST_CACHE_VS_0: + case SQ_ALU_CONST_CACHE_VS_1: + case SQ_ALU_CONST_CACHE_VS_2: + case SQ_ALU_CONST_CACHE_VS_3: + case SQ_ALU_CONST_CACHE_VS_4: + case SQ_ALU_CONST_CACHE_VS_5: + case SQ_ALU_CONST_CACHE_VS_6: + case SQ_ALU_CONST_CACHE_VS_7: + case SQ_ALU_CONST_CACHE_VS_8: + case SQ_ALU_CONST_CACHE_VS_9: + case SQ_ALU_CONST_CACHE_VS_10: + case SQ_ALU_CONST_CACHE_VS_11: + case SQ_ALU_CONST_CACHE_VS_12: + case SQ_ALU_CONST_CACHE_VS_13: + case SQ_ALU_CONST_CACHE_VS_14: + case SQ_ALU_CONST_CACHE_VS_15: r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " @@ -1226,13 +1279,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } break; case PACKET3_SET_ALU_CONST: - start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; - end_reg = 4 * pkt->count + start_reg - 4; - if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || - (start_reg >= PACKET3_SET_ALU_CONST_END) || - (end_reg >= PACKET3_SET_ALU_CONST_END)) { - DRM_ERROR("bad SET_ALU_CONST\n"); - return -EINVAL; + if (track->sq_config & DX9_CONSTS) { + start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; + end_reg = 4 * pkt->count + start_reg - 4; + if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || + (start_reg >= PACKET3_SET_ALU_CONST_END) || + (end_reg >= PACKET3_SET_ALU_CONST_END)) { + DRM_ERROR("bad SET_ALU_CONST\n"); + return -EINVAL; + } } break; case PACKET3_SET_BOOL_CONST: diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index fcc949df0e5d..26b4bc9d89a5 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -42,13 +42,13 @@ enum r600_hdmi_color_format { */ enum r600_hdmi_iec_status_bits { AUDIO_STATUS_DIG_ENABLE = 0x01, - AUDIO_STATUS_V = 0x02, - AUDIO_STATUS_VCFG = 0x04, + AUDIO_STATUS_V = 0x02, + AUDIO_STATUS_VCFG = 0x04, AUDIO_STATUS_EMPHASIS = 0x08, AUDIO_STATUS_COPYRIGHT = 0x10, AUDIO_STATUS_NONAUDIO = 0x20, AUDIO_STATUS_PROFESSIONAL = 0x40, - AUDIO_STATUS_LEVEL = 0x80 + AUDIO_STATUS_LEVEL = 0x80 }; struct { @@ -85,7 +85,7 @@ struct { static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) { if (*CTS == 0) - *CTS = clock*N/(128*freq)*1000; + *CTS = clock * N / (128 * freq) * 1000; DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", N, *CTS, freq); } @@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType, uint8_t length, uint8_t *frame) { - int i; - frame[0] = packetType + versionNumber + length; - for (i = 1; i <= length; i++) - frame[0] += frame[i]; - frame[0] = 0x100 - frame[0]; + int i; + frame[0] = packetType + versionNumber + length; + for (i = 1; i <= length; i++) + frame[0] += frame[i]; + frame[0] = 0x100 - frame[0]; } /* @@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder) if (!offset) return; - if (r600_hdmi_is_audio_buffer_filled(encoder)) { - /* disable audio workaround and start delivering of audio frames */ - WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); + if (!radeon_encoder->hdmi_audio_workaround || + r600_hdmi_is_audio_buffer_filled(encoder)) { - } else if (radeon_encoder->hdmi_audio_workaround) { - /* enable audio workaround and start delivering of audio frames */ - WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); + /* disable audio workaround */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); } else { - /* disable audio workaround and stop delivering of audio frames */ - WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); + /* enable audio workaround */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); } } @@ -314,6 +312,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod struct radeon_device *rdev = dev->dev_private; uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + if (ASIC_IS_DCE4(rdev)) + return; + if (!offset) return; @@ -342,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod /* audio packets per line, does anyone know how to calc this ? */ WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); - - /* update? reset? don't realy know */ - WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000); } /* * update settings with current parameters from audio engine */ -void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, - int channels, - int rate, - int bps, - uint8_t status_bits, - uint8_t category_code) +void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + int channels = r600_audio_channels(rdev); + int rate = r600_audio_rate(rdev); + int bps = r600_audio_bits_per_sample(rdev); + uint8_t status_bits = r600_audio_status_bits(rdev); + uint8_t category_code = r600_audio_category_code(rdev); + uint32_t iec; if (!offset) @@ -412,95 +411,173 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); r600_hdmi_audio_workaround(encoder); +} - /* update? reset? don't realy know */ - WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); +static int r600_hdmi_find_free_block(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + bool free_blocks[3] = { true, true, true }; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + radeon_encoder = to_radeon_encoder(encoder); + switch (radeon_encoder->hdmi_offset) { + case R600_HDMI_BLOCK1: + free_blocks[0] = false; + break; + case R600_HDMI_BLOCK2: + free_blocks[1] = false; + break; + case R600_HDMI_BLOCK3: + free_blocks[2] = false; + break; + } + } + + if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) { + return free_blocks[0] ? R600_HDMI_BLOCK1 : 0; + } else if (rdev->family >= CHIP_R600) { + if (free_blocks[0]) + return R600_HDMI_BLOCK1; + else if (free_blocks[1]) + return R600_HDMI_BLOCK2; + } + return 0; } -/* - * enable/disable the HDMI engine - */ -void r600_hdmi_enable(struct drm_encoder *encoder, int enable) +static void r600_hdmi_assign_block(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (!offset) + if (!dig) { + dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n"); return; + } - DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); - - /* some version of atombios ignore the enable HDMI flag - * so enabling/disabling HDMI was moved here for TMDS1+2 */ - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); - WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); - break; - - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4); - WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0); - break; - - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - /* This part is doubtfull in my opinion */ - WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0); - break; - - default: - DRM_ERROR("unknown HDMI output type\n"); - break; + if (ASIC_IS_DCE4(rdev)) { + /* TODO */ + } else if (ASIC_IS_DCE3(rdev)) { + radeon_encoder->hdmi_offset = dig->dig_encoder ? + R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1; + if (ASIC_IS_DCE32(rdev)) + radeon_encoder->hdmi_config_offset = dig->dig_encoder ? + R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1; + } else if (rdev->family >= CHIP_R600) { + radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev); } } /* - * determin at which register offset the HDMI encoder is + * enable the HDMI engine */ -void r600_hdmi_init(struct drm_encoder *encoder) +void r600_hdmi_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t offset; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; - break; - - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - switch (r600_audio_tmds_index(encoder)) { - case 0: - radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; + if (ASIC_IS_DCE4(rdev)) + return; + + if (!radeon_encoder->hdmi_offset) { + r600_hdmi_assign_block(encoder); + if (!radeon_encoder->hdmi_offset) { + dev_warn(rdev->dev, "Could not find HDMI block for " + "0x%x encoder\n", radeon_encoder->encoder_id); + return; + } + } + + offset = radeon_encoder->hdmi_offset; + if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { + WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); + } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); + WREG32(offset + R600_HDMI_ENABLE, 0x101); break; - case 1: - radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4); + WREG32(offset + R600_HDMI_ENABLE, 0x105); break; default: - radeon_encoder->hdmi_offset = 0; + dev_err(rdev->dev, "Unknown HDMI output type\n"); break; } - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; - break; + } + + if (rdev->irq.installed + && rdev->family != CHIP_RS600 + && rdev->family != CHIP_RS690 + && rdev->family != CHIP_RS740) { + + /* if irq is available use it */ + rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; + radeon_irq_set(rdev); + + r600_audio_disable_polling(encoder); + } else { + /* if not fallback to polling */ + r600_audio_enable_polling(encoder); + } + + DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", + radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); +} + +/* + * disable the HDMI engine + */ +void r600_hdmi_disable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t offset; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - radeon_encoder->hdmi_offset = R600_HDMI_DIG; - break; + if (ASIC_IS_DCE4(rdev)) + return; - default: - radeon_encoder->hdmi_offset = 0; - break; + offset = radeon_encoder->hdmi_offset; + if (!offset) { + dev_err(rdev->dev, "Disabling not enabled HDMI\n"); + return; } - DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", - radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); + DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", + offset, radeon_encoder->encoder_id); + + /* disable irq */ + rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; + radeon_irq_set(rdev); + + /* disable polling */ + r600_audio_disable_polling(encoder); + + if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { + WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); + } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); + WREG32(offset + R600_HDMI_ENABLE, 0); + break; + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4); + WREG32(offset + R600_HDMI_ENABLE, 0); + break; + default: + dev_err(rdev->dev, "Unknown HDMI output type\n"); + break; + } + } - /* TODO: make this configureable */ - radeon_encoder->hdmi_audio_workaround = 0; + radeon_encoder->hdmi_offset = 0; + radeon_encoder->hdmi_config_offset = 0; } diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d0e28ffdeda9..d84612ae47e0 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -152,37 +152,44 @@ #define R600_AUDIO_STATUS_BITS 0x73d8 /* HDMI base register addresses */ -#define R600_HDMI_TMDS1 0x7400 -#define R600_HDMI_TMDS2 0x7700 -#define R600_HDMI_DIG 0x7800 +#define R600_HDMI_BLOCK1 0x7400 +#define R600_HDMI_BLOCK2 0x7700 +#define R600_HDMI_BLOCK3 0x7800 /* HDMI registers */ -#define R600_HDMI_ENABLE 0x00 -#define R600_HDMI_STATUS 0x04 -#define R600_HDMI_CNTL 0x08 -#define R600_HDMI_UNKNOWN_0 0x0C -#define R600_HDMI_AUDIOCNTL 0x10 -#define R600_HDMI_VIDEOCNTL 0x14 -#define R600_HDMI_VERSION 0x18 -#define R600_HDMI_UNKNOWN_1 0x28 -#define R600_HDMI_VIDEOINFOFRAME_0 0x54 -#define R600_HDMI_VIDEOINFOFRAME_1 0x58 -#define R600_HDMI_VIDEOINFOFRAME_2 0x5c -#define R600_HDMI_VIDEOINFOFRAME_3 0x60 -#define R600_HDMI_32kHz_CTS 0xac -#define R600_HDMI_32kHz_N 0xb0 -#define R600_HDMI_44_1kHz_CTS 0xb4 -#define R600_HDMI_44_1kHz_N 0xb8 -#define R600_HDMI_48kHz_CTS 0xbc -#define R600_HDMI_48kHz_N 0xc0 -#define R600_HDMI_AUDIOINFOFRAME_0 0xcc -#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 -#define R600_HDMI_IEC60958_1 0xd4 -#define R600_HDMI_IEC60958_2 0xd8 -#define R600_HDMI_UNKNOWN_2 0xdc -#define R600_HDMI_AUDIO_DEBUG_0 0xe0 -#define R600_HDMI_AUDIO_DEBUG_1 0xe4 -#define R600_HDMI_AUDIO_DEBUG_2 0xe8 -#define R600_HDMI_AUDIO_DEBUG_3 0xec +#define R600_HDMI_ENABLE 0x00 +#define R600_HDMI_STATUS 0x04 +# define R600_HDMI_INT_PENDING (1 << 29) +#define R600_HDMI_CNTL 0x08 +# define R600_HDMI_INT_EN (1 << 28) +# define R600_HDMI_INT_ACK (1 << 29) +#define R600_HDMI_UNKNOWN_0 0x0C +#define R600_HDMI_AUDIOCNTL 0x10 +#define R600_HDMI_VIDEOCNTL 0x14 +#define R600_HDMI_VERSION 0x18 +#define R600_HDMI_UNKNOWN_1 0x28 +#define R600_HDMI_VIDEOINFOFRAME_0 0x54 +#define R600_HDMI_VIDEOINFOFRAME_1 0x58 +#define R600_HDMI_VIDEOINFOFRAME_2 0x5c +#define R600_HDMI_VIDEOINFOFRAME_3 0x60 +#define R600_HDMI_32kHz_CTS 0xac +#define R600_HDMI_32kHz_N 0xb0 +#define R600_HDMI_44_1kHz_CTS 0xb4 +#define R600_HDMI_44_1kHz_N 0xb8 +#define R600_HDMI_48kHz_CTS 0xbc +#define R600_HDMI_48kHz_N 0xc0 +#define R600_HDMI_AUDIOINFOFRAME_0 0xcc +#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 +#define R600_HDMI_IEC60958_1 0xd4 +#define R600_HDMI_IEC60958_2 0xd8 +#define R600_HDMI_UNKNOWN_2 0xdc +#define R600_HDMI_AUDIO_DEBUG_0 0xe0 +#define R600_HDMI_AUDIO_DEBUG_1 0xe4 +#define R600_HDMI_AUDIO_DEBUG_2 0xe8 +#define R600_HDMI_AUDIO_DEBUG_3 0xec + +/* HDMI additional config base register addresses */ +#define R600_HDMI_CONFIG1 0x7600 +#define R600_HDMI_CONFIG2 0x7a00 #endif diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 5b2e4d442823..59c1f8793e60 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -77,6 +77,55 @@ #define CB_COLOR0_FRAG 0x280e0 #define CB_COLOR0_MASK 0x28100 +#define SQ_ALU_CONST_CACHE_PS_0 0x28940 +#define SQ_ALU_CONST_CACHE_PS_1 0x28944 +#define SQ_ALU_CONST_CACHE_PS_2 0x28948 +#define SQ_ALU_CONST_CACHE_PS_3 0x2894c +#define SQ_ALU_CONST_CACHE_PS_4 0x28950 +#define SQ_ALU_CONST_CACHE_PS_5 0x28954 +#define SQ_ALU_CONST_CACHE_PS_6 0x28958 +#define SQ_ALU_CONST_CACHE_PS_7 0x2895c +#define SQ_ALU_CONST_CACHE_PS_8 0x28960 +#define SQ_ALU_CONST_CACHE_PS_9 0x28964 +#define SQ_ALU_CONST_CACHE_PS_10 0x28968 +#define SQ_ALU_CONST_CACHE_PS_11 0x2896c +#define SQ_ALU_CONST_CACHE_PS_12 0x28970 +#define SQ_ALU_CONST_CACHE_PS_13 0x28974 +#define SQ_ALU_CONST_CACHE_PS_14 0x28978 +#define SQ_ALU_CONST_CACHE_PS_15 0x2897c +#define SQ_ALU_CONST_CACHE_VS_0 0x28980 +#define SQ_ALU_CONST_CACHE_VS_1 0x28984 +#define SQ_ALU_CONST_CACHE_VS_2 0x28988 +#define SQ_ALU_CONST_CACHE_VS_3 0x2898c +#define SQ_ALU_CONST_CACHE_VS_4 0x28990 +#define SQ_ALU_CONST_CACHE_VS_5 0x28994 +#define SQ_ALU_CONST_CACHE_VS_6 0x28998 +#define SQ_ALU_CONST_CACHE_VS_7 0x2899c +#define SQ_ALU_CONST_CACHE_VS_8 0x289a0 +#define SQ_ALU_CONST_CACHE_VS_9 0x289a4 +#define SQ_ALU_CONST_CACHE_VS_10 0x289a8 +#define SQ_ALU_CONST_CACHE_VS_11 0x289ac +#define SQ_ALU_CONST_CACHE_VS_12 0x289b0 +#define SQ_ALU_CONST_CACHE_VS_13 0x289b4 +#define SQ_ALU_CONST_CACHE_VS_14 0x289b8 +#define SQ_ALU_CONST_CACHE_VS_15 0x289bc +#define SQ_ALU_CONST_CACHE_GS_0 0x289c0 +#define SQ_ALU_CONST_CACHE_GS_1 0x289c4 +#define SQ_ALU_CONST_CACHE_GS_2 0x289c8 +#define SQ_ALU_CONST_CACHE_GS_3 0x289cc +#define SQ_ALU_CONST_CACHE_GS_4 0x289d0 +#define SQ_ALU_CONST_CACHE_GS_5 0x289d4 +#define SQ_ALU_CONST_CACHE_GS_6 0x289d8 +#define SQ_ALU_CONST_CACHE_GS_7 0x289dc +#define SQ_ALU_CONST_CACHE_GS_8 0x289e0 +#define SQ_ALU_CONST_CACHE_GS_9 0x289e4 +#define SQ_ALU_CONST_CACHE_GS_10 0x289e8 +#define SQ_ALU_CONST_CACHE_GS_11 0x289ec +#define SQ_ALU_CONST_CACHE_GS_12 0x289f0 +#define SQ_ALU_CONST_CACHE_GS_13 0x289f4 +#define SQ_ALU_CONST_CACHE_GS_14 0x289f8 +#define SQ_ALU_CONST_CACHE_GS_15 0x289fc + #define CONFIG_MEMSIZE 0x5428 #define CONFIG_CNTL 0x5424 #define CP_STAT 0x8680 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 829e26e8a4bb..2f94dc66c183 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -89,14 +89,16 @@ extern int radeon_testing; extern int radeon_connector_table; extern int radeon_tv; extern int radeon_new_pll; -extern int radeon_dynpm; extern int radeon_audio; +extern int radeon_disp_priority; +extern int radeon_hw_i2c; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting * symbol; */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 @@ -168,9 +170,14 @@ struct radeon_clock { * Power management */ int radeon_pm_init(struct radeon_device *rdev); +void radeon_pm_fini(struct radeon_device *rdev); void radeon_pm_compute_clocks(struct radeon_device *rdev); +void radeon_pm_suspend(struct radeon_device *rdev); +void radeon_pm_resume(struct radeon_device *rdev); void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); +void rs690_pm_info(struct radeon_device *rdev); /* * Fences. @@ -179,7 +186,8 @@ struct radeon_fence_driver { uint32_t scratch_reg; atomic_t seq; uint32_t last_seq; - unsigned long count_timeout; + unsigned long last_jiffies; + unsigned long last_timeout; wait_queue_head_t queue; rwlock_t lock; struct list_head created; @@ -194,7 +202,6 @@ struct radeon_fence { struct list_head list; /* protected by radeon_fence.lock */ uint32_t seq; - unsigned long timeout; bool emited; bool signaled; }; @@ -256,6 +263,7 @@ struct radeon_bo_list { unsigned rdomain; unsigned wdomain; u32 tiling_flags; + bool reserved; }; /* @@ -343,6 +351,7 @@ struct radeon_mc { int vram_mtrr; bool vram_is_ddr; bool igp_sideport_enabled; + u64 gtt_base_align; }; bool radeon_combios_sideport_present(struct radeon_device *rdev); @@ -368,10 +377,15 @@ struct radeon_irq { bool installed; bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ - bool crtc_vblank_int[2]; + bool crtc_vblank_int[6]; wait_queue_head_t vblank_queue; /* FIXME: use defines for max hpd/dacs */ bool hpd[6]; + bool gui_idle; + bool gui_idle_acked; + wait_queue_head_t idle_queue; + /* FIXME: use defines for max HDMI blocks */ + bool hdmi[2]; spinlock_t sw_lock; int sw_refcount; }; @@ -459,7 +473,9 @@ int radeon_ib_test(struct radeon_device *rdev); extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); /* Ring access between begin & end cannot sleep */ void radeon_ring_free_size(struct radeon_device *rdev); +int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); +void radeon_ring_commit(struct radeon_device *rdev); void radeon_ring_unlock_commit(struct radeon_device *rdev); void radeon_ring_unlock_undo(struct radeon_device *rdev); int radeon_ring_test(struct radeon_device *rdev); @@ -563,6 +579,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, */ int radeon_agp_init(struct radeon_device *rdev); void radeon_agp_resume(struct radeon_device *rdev); +void radeon_agp_suspend(struct radeon_device *rdev); void radeon_agp_fini(struct radeon_device *rdev); @@ -594,17 +611,25 @@ struct radeon_wb { * Equation between gpu/memory clock and available bandwidth is hw dependent * (type of memory, bus size, efficiency, ...) */ -enum radeon_pm_state { - PM_STATE_DISABLED, - PM_STATE_MINIMUM, - PM_STATE_PAUSED, - PM_STATE_ACTIVE + +enum radeon_pm_method { + PM_METHOD_PROFILE, + PM_METHOD_DYNPM, +}; + +enum radeon_dynpm_state { + DYNPM_STATE_DISABLED, + DYNPM_STATE_MINIMUM, + DYNPM_STATE_PAUSED, + DYNPM_STATE_ACTIVE, + DYNPM_STATE_SUSPENDED, }; -enum radeon_pm_action { - PM_ACTION_NONE, - PM_ACTION_MINIMUM, - PM_ACTION_DOWNCLOCK, - PM_ACTION_UPCLOCK +enum radeon_dynpm_action { + DYNPM_ACTION_NONE, + DYNPM_ACTION_MINIMUM, + DYNPM_ACTION_DOWNCLOCK, + DYNPM_ACTION_UPCLOCK, + DYNPM_ACTION_DEFAULT }; enum radeon_voltage_type { @@ -622,11 +647,28 @@ enum radeon_pm_state_type { POWER_STATE_TYPE_PERFORMANCE, }; -enum radeon_pm_clock_mode_type { - POWER_MODE_TYPE_DEFAULT, - POWER_MODE_TYPE_LOW, - POWER_MODE_TYPE_MID, - POWER_MODE_TYPE_HIGH, +enum radeon_pm_profile_type { + PM_PROFILE_DEFAULT, + PM_PROFILE_AUTO, + PM_PROFILE_LOW, + PM_PROFILE_MID, + PM_PROFILE_HIGH, +}; + +#define PM_PROFILE_DEFAULT_IDX 0 +#define PM_PROFILE_LOW_SH_IDX 1 +#define PM_PROFILE_MID_SH_IDX 2 +#define PM_PROFILE_HIGH_SH_IDX 3 +#define PM_PROFILE_LOW_MH_IDX 4 +#define PM_PROFILE_MID_MH_IDX 5 +#define PM_PROFILE_HIGH_MH_IDX 6 +#define PM_PROFILE_MAX 7 + +struct radeon_pm_profile { + int dpms_off_ps_idx; + int dpms_on_ps_idx; + int dpms_off_cm_idx; + int dpms_on_cm_idx; }; struct radeon_voltage { @@ -643,12 +685,8 @@ struct radeon_voltage { u32 voltage; }; -struct radeon_pm_non_clock_info { - /* pcie lanes */ - int pcie_lanes; - /* standardized non-clock flags */ - u32 flags; -}; +/* clock mode flags */ +#define RADEON_PM_MODE_NO_DISPLAY (1 << 0) struct radeon_pm_clock_info { /* memory clock */ @@ -657,10 +695,13 @@ struct radeon_pm_clock_info { u32 sclk; /* voltage info */ struct radeon_voltage voltage; - /* standardized clock flags - not sure we'll need these */ + /* standardized clock flags */ u32 flags; }; +/* state flags */ +#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) + struct radeon_power_state { enum radeon_pm_state_type type; /* XXX: use a define for num clock modes */ @@ -668,9 +709,11 @@ struct radeon_power_state { /* number of valid clock modes in this power state */ int num_clock_modes; struct radeon_pm_clock_info *default_clock_mode; - /* non clock info about this state */ - struct radeon_pm_non_clock_info non_clock_info; - bool voltage_drop_active; + /* standardized state flags */ + u32 flags; + u32 misc; /* vbios specific flags */ + u32 misc2; /* vbios specific flags */ + int pcie_lanes; /* pcie lanes */ }; /* @@ -680,13 +723,11 @@ struct radeon_power_state { struct radeon_pm { struct mutex mutex; - struct delayed_work idle_work; - enum radeon_pm_state state; - enum radeon_pm_action planned_action; - unsigned long action_timeout; - bool downclocked; - int active_crtcs; + u32 active_crtcs; + int active_crtc_count; int req_vblank; + bool vblank_sync; + bool gui_idle; fixed20_12 max_bandwidth; fixed20_12 igp_sideport_mclk; fixed20_12 igp_system_mclk; @@ -697,16 +738,34 @@ struct radeon_pm { fixed20_12 ht_bandwidth; fixed20_12 core_bandwidth; fixed20_12 sclk; + fixed20_12 mclk; fixed20_12 needed_bandwidth; /* XXX: use a define for num power modes */ struct radeon_power_state power_state[8]; /* number of valid power states */ int num_power_states; - struct radeon_power_state *current_power_state; - struct radeon_pm_clock_info *current_clock_mode; - struct radeon_power_state *requested_power_state; - struct radeon_pm_clock_info *requested_clock_mode; - struct radeon_power_state *default_power_state; + int current_power_state_index; + int current_clock_mode_index; + int requested_power_state_index; + int requested_clock_mode_index; + int default_power_state_index; + u32 current_sclk; + u32 current_mclk; + u32 current_vddc; + struct radeon_i2c_chan *i2c_bus; + /* selected pm method */ + enum radeon_pm_method pm_method; + /* dynpm power management */ + struct delayed_work dynpm_idle_work; + enum radeon_dynpm_state dynpm_state; + enum radeon_dynpm_action dynpm_planned_action; + unsigned long dynpm_action_timeout; + bool dynpm_can_upclock; + bool dynpm_can_downclock; + /* profile-based power management */ + enum radeon_pm_profile_type profile; + int profile_index; + struct radeon_pm_profile profiles[PM_PROFILE_MAX]; }; @@ -729,8 +788,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, struct drm_info_list *files, unsigned nfiles); int radeon_debugfs_fence_init(struct radeon_device *rdev); -int r100_debugfs_rbbm_init(struct radeon_device *rdev); -int r100_debugfs_cp_init(struct radeon_device *rdev); /* @@ -742,7 +799,8 @@ struct radeon_asic { int (*resume)(struct radeon_device *rdev); int (*suspend)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); - int (*gpu_reset)(struct radeon_device *rdev); + bool (*gpu_is_lockup)(struct radeon_device *rdev); + int (*asic_reset)(struct radeon_device *rdev); void (*gart_tlb_flush)(struct radeon_device *rdev); int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); @@ -782,7 +840,7 @@ struct radeon_asic { int (*set_surface_reg)(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); - int (*clear_surface_reg)(struct radeon_device *rdev, int reg); + void (*clear_surface_reg)(struct radeon_device *rdev, int reg); void (*bandwidth_update)(struct radeon_device *rdev); void (*hpd_init)(struct radeon_device *rdev); void (*hpd_fini)(struct radeon_device *rdev); @@ -795,44 +853,84 @@ struct radeon_asic { * through ring. */ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); + bool (*gui_idle)(struct radeon_device *rdev); + /* power management */ + void (*pm_misc)(struct radeon_device *rdev); + void (*pm_prepare)(struct radeon_device *rdev); + void (*pm_finish)(struct radeon_device *rdev); + void (*pm_init_profile)(struct radeon_device *rdev); + void (*pm_get_dynpm_state)(struct radeon_device *rdev); }; /* * Asic structures */ +struct r100_gpu_lockup { + unsigned long last_jiffies; + u32 last_cp_rptr; +}; + struct r100_asic { - const unsigned *reg_safe_bm; - unsigned reg_safe_bm_size; - u32 hdp_cntl; + const unsigned *reg_safe_bm; + unsigned reg_safe_bm_size; + u32 hdp_cntl; + struct r100_gpu_lockup lockup; }; struct r300_asic { - const unsigned *reg_safe_bm; - unsigned reg_safe_bm_size; - u32 resync_scratch; - u32 hdp_cntl; + const unsigned *reg_safe_bm; + unsigned reg_safe_bm_size; + u32 resync_scratch; + u32 hdp_cntl; + struct r100_gpu_lockup lockup; }; struct r600_asic { - unsigned max_pipes; - unsigned max_tile_pipes; - unsigned max_simds; - unsigned max_backends; - unsigned max_gprs; - unsigned max_threads; - unsigned max_stack_entries; - unsigned max_hw_contexts; - unsigned max_gs_threads; - unsigned sx_max_export_size; - unsigned sx_max_export_pos_size; - unsigned sx_max_export_smx_size; - unsigned sq_num_cf_insts; - unsigned tiling_nbanks; - unsigned tiling_npipes; - unsigned tiling_group_size; + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; + struct r100_gpu_lockup lockup; }; struct rv770_asic { + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; + unsigned sx_num_of_sets; + unsigned sc_prim_fifo_size; + unsigned sc_hiz_tile_fifo_size; + unsigned sc_earlyz_tile_fifo_fize; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; + struct r100_gpu_lockup lockup; +}; + +struct evergreen_asic { + unsigned num_ses; unsigned max_pipes; unsigned max_tile_pipes; unsigned max_simds; @@ -849,7 +947,7 @@ struct rv770_asic { unsigned sx_num_of_sets; unsigned sc_prim_fifo_size; unsigned sc_hiz_tile_fifo_size; - unsigned sc_earlyz_tile_fifo_fize; + unsigned sc_earlyz_tile_fifo_size; unsigned tiling_nbanks; unsigned tiling_npipes; unsigned tiling_group_size; @@ -860,8 +958,15 @@ union radeon_asic_config { struct r100_asic r100; struct r600_asic r600; struct rv770_asic rv770; + struct evergreen_asic evergreen; }; +/* + * asic initizalization from radeon_asic.c + */ +void radeon_agp_disable(struct radeon_device *rdev); +int radeon_asic_init(struct radeon_device *rdev); + /* * IOCTL. @@ -917,9 +1022,6 @@ struct radeon_device { bool is_atom_bios; uint16_t bios_header_start; struct radeon_bo *stollen_vga_memory; - struct fb_info *fbdev_info; - struct radeon_bo *fbdev_rbo; - struct radeon_framebuffer *fbdev_rfb; /* Register mmio */ resource_size_t rmmio_base; resource_size_t rmmio_size; @@ -964,6 +1066,7 @@ struct radeon_device { struct work_struct hotplug_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ + struct mutex vram_mutex; /* audio stuff */ struct timer_list audio_timer; @@ -974,6 +1077,7 @@ struct radeon_device { uint8_t audio_category_code; bool powered_down; + struct notifier_block acpi_nb; }; int radeon_device_init(struct radeon_device *rdev, @@ -1135,7 +1239,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) -#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) +#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) +#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) @@ -1163,15 +1268,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) +#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) +#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) +#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) +#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) +#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) +#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) /* Common functions */ /* AGP */ +extern int radeon_gpu_reset(struct radeon_device *rdev); extern void radeon_agp_disable(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); extern void radeon_gart_restore(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev); +extern void radeon_update_bandwidth_info(struct radeon_device *rdev); +extern void radeon_update_display_priority(struct radeon_device *rdev); extern bool radeon_boot_test_post_card(struct radeon_device *rdev); extern int radeon_clocks_init(struct radeon_device *rdev); extern void radeon_clocks_fini(struct radeon_device *rdev); @@ -1188,51 +1302,8 @@ extern int radeon_resume_kms(struct drm_device *dev); extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -struct r100_mc_save { - u32 GENMO_WT; - u32 CRTC_EXT_CNTL; - u32 CRTC_GEN_CNTL; - u32 CRTC2_GEN_CNTL; - u32 CUR_OFFSET; - u32 CUR2_OFFSET; -}; -extern void r100_cp_disable(struct radeon_device *rdev); -extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); -extern void r100_cp_fini(struct radeon_device *rdev); -extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); -extern int r100_pci_gart_init(struct radeon_device *rdev); -extern void r100_pci_gart_fini(struct radeon_device *rdev); -extern int r100_pci_gart_enable(struct radeon_device *rdev); -extern void r100_pci_gart_disable(struct radeon_device *rdev); -extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); -extern int r100_gui_wait_for_idle(struct radeon_device *rdev); -extern void r100_ib_fini(struct radeon_device *rdev); -extern int r100_ib_init(struct radeon_device *rdev); -extern void r100_irq_disable(struct radeon_device *rdev); -extern int r100_irq_set(struct radeon_device *rdev); -extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); -extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); -extern void r100_vram_init_sizes(struct radeon_device *rdev); -extern void r100_wb_disable(struct radeon_device *rdev); -extern void r100_wb_fini(struct radeon_device *rdev); -extern int r100_wb_init(struct radeon_device *rdev); -extern void r100_hdp_reset(struct radeon_device *rdev); -extern int r100_rb2d_reset(struct radeon_device *rdev); -extern int r100_cp_reset(struct radeon_device *rdev); -extern void r100_vga_render_disable(struct radeon_device *rdev); -extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - struct radeon_bo *robj); -extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - const unsigned *auth, unsigned n, - radeon_packet0_check_t check); -extern int r100_cs_packet_parse(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned idx); -extern void r100_enable_bm(struct radeon_device *rdev); -extern void r100_set_common_regs(struct radeon_device *rdev); +extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); +extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); /* rv200,rv250,rv280 */ extern void r200_set_safe_registers(struct radeon_device *rdev); @@ -1293,6 +1364,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); +extern int r600_cp_start(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); extern void r600_cp_fini(struct radeon_device *rdev); @@ -1309,28 +1381,39 @@ extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); extern int r600_init_microcode(struct radeon_device *rdev); -extern int r600_gpu_reset(struct radeon_device *rdev); +extern int r600_asic_reset(struct radeon_device *rdev); /* r600 irq */ extern int r600_irq_init(struct radeon_device *rdev); extern void r600_irq_fini(struct radeon_device *rdev); extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_irq_set(struct radeon_device *rdev); extern void r600_irq_suspend(struct radeon_device *rdev); +extern void r600_disable_interrupts(struct radeon_device *rdev); +extern void r600_rlc_stop(struct radeon_device *rdev); /* r600 audio */ extern int r600_audio_init(struct radeon_device *rdev); extern int r600_audio_tmds_index(struct drm_encoder *encoder); extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); +extern int r600_audio_channels(struct radeon_device *rdev); +extern int r600_audio_bits_per_sample(struct radeon_device *rdev); +extern int r600_audio_rate(struct radeon_device *rdev); +extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); +extern uint8_t r600_audio_category_code(struct radeon_device *rdev); +extern void r600_audio_schedule_polling(struct radeon_device *rdev); +extern void r600_audio_enable_polling(struct drm_encoder *encoder); +extern void r600_audio_disable_polling(struct drm_encoder *encoder); extern void r600_audio_fini(struct radeon_device *rdev); extern void r600_hdmi_init(struct drm_encoder *encoder); -extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); +extern void r600_hdmi_enable(struct drm_encoder *encoder); +extern void r600_hdmi_disable(struct drm_encoder *encoder); extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); -extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, - int channels, - int rate, - int bps, - uint8_t status_bits, - uint8_t category_code); +extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); + +extern void r700_cp_stop(struct radeon_device *rdev); +extern void r700_cp_fini(struct radeon_device *rdev); +extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); +extern int evergreen_irq_set(struct radeon_device *rdev); /* evergreen */ struct evergreen_mc_save { diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index c4457791dff1..f40dfb77f9b1 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -134,12 +134,10 @@ int radeon_agp_init(struct radeon_device *rdev) int ret; /* Acquire AGP. */ - if (!rdev->ddev->agp->acquired) { - ret = drm_agp_acquire(rdev->ddev); - if (ret) { - DRM_ERROR("Unable to acquire AGP: %d\n", ret); - return ret; - } + ret = drm_agp_acquire(rdev->ddev); + if (ret) { + DRM_ERROR("Unable to acquire AGP: %d\n", ret); + return ret; } ret = drm_agp_info(rdev->ddev, &info); @@ -272,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) } #endif } + +void radeon_agp_suspend(struct radeon_device *rdev) +{ + radeon_agp_fini(rdev); +} diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c new file mode 100644 index 000000000000..646f96f97c77 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -0,0 +1,877 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ + +#include <linux/console.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/radeon_drm.h> +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> +#include "radeon_reg.h" +#include "radeon.h" +#include "radeon_asic.h" +#include "atom.h" + +/* + * Registers accessors functions. + */ +static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) +{ + DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); + BUG_ON(1); + return 0; +} + +static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) +{ + DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", + reg, v); + BUG_ON(1); +} + +static void radeon_register_accessor_init(struct radeon_device *rdev) +{ + rdev->mc_rreg = &radeon_invalid_rreg; + rdev->mc_wreg = &radeon_invalid_wreg; + rdev->pll_rreg = &radeon_invalid_rreg; + rdev->pll_wreg = &radeon_invalid_wreg; + rdev->pciep_rreg = &radeon_invalid_rreg; + rdev->pciep_wreg = &radeon_invalid_wreg; + + /* Don't change order as we are overridding accessor. */ + if (rdev->family < CHIP_RV515) { + rdev->pcie_reg_mask = 0xff; + } else { + rdev->pcie_reg_mask = 0x7ff; + } + /* FIXME: not sure here */ + if (rdev->family <= CHIP_R580) { + rdev->pll_rreg = &r100_pll_rreg; + rdev->pll_wreg = &r100_pll_wreg; + } + if (rdev->family >= CHIP_R420) { + rdev->mc_rreg = &r420_mc_rreg; + rdev->mc_wreg = &r420_mc_wreg; + } + if (rdev->family >= CHIP_RV515) { + rdev->mc_rreg = &rv515_mc_rreg; + rdev->mc_wreg = &rv515_mc_wreg; + } + if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { + rdev->mc_rreg = &rs400_mc_rreg; + rdev->mc_wreg = &rs400_mc_wreg; + } + if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { + rdev->mc_rreg = &rs690_mc_rreg; + rdev->mc_wreg = &rs690_mc_wreg; + } + if (rdev->family == CHIP_RS600) { + rdev->mc_rreg = &rs600_mc_rreg; + rdev->mc_wreg = &rs600_mc_wreg; + } + if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { + rdev->pciep_rreg = &r600_pciep_rreg; + rdev->pciep_wreg = &r600_pciep_wreg; + } +} + + +/* helper to disable agp */ +void radeon_agp_disable(struct radeon_device *rdev) +{ + rdev->flags &= ~RADEON_IS_AGP; + if (rdev->family >= CHIP_R600) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + } else if (rdev->family >= CHIP_RV515 || + rdev->family == CHIP_RV380 || + rdev->family == CHIP_RV410 || + rdev->family == CHIP_R423) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; + } else { + DRM_INFO("Forcing AGP to PCI mode\n"); + rdev->flags |= RADEON_IS_PCI; + rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart_set_page = &r100_pci_gart_set_page; + } + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; +} + +/* + * ASIC + */ +static struct radeon_asic r100_asic = { + .init = &r100_init, + .fini = &r100_fini, + .suspend = &r100_suspend, + .resume = &r100_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r100_gpu_is_lockup, + .asic_reset = &r100_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r100_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r100_fence_ring_emit, + .cs_parse = &r100_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = NULL, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r100_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r200_asic = { + .init = &r100_init, + .fini = &r100_fini, + .suspend = &r100_suspend, + .resume = &r100_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r100_gpu_is_lockup, + .asic_reset = &r100_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r100_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r100_fence_ring_emit, + .cs_parse = &r100_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r100_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r300_asic = { + .init = &r300_init, + .fini = &r300_fini, + .suspend = &r300_suspend, + .resume = &r300_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r100_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r300_asic_pcie = { + .init = &r300_init, + .fini = &r300_fini, + .suspend = &r300_suspend, + .resume = &r300_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r100_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r420_asic = { + .init = &r420_init, + .fini = &r420_fini, + .suspend = &r420_suspend, + .resume = &r420_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r420_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic rs400_asic = { + .init = &rs400_init, + .fini = &rs400_fini, + .suspend = &rs400_suspend, + .resume = &rs400_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rs400_gart_tlb_flush, + .gart_set_page = &rs400_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &r100_pm_misc, + .pm_prepare = &r100_pm_prepare, + .pm_finish = &r100_pm_finish, + .pm_init_profile = &r100_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic rs600_asic = { + .init = &rs600_init, + .fini = &rs600_fini, + .suspend = &rs600_suspend, + .resume = &rs600_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rs600_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rs600_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &rs600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r420_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic rs690_asic = { + .init = &rs690_init, + .fini = &rs690_fini, + .suspend = &rs690_suspend, + .resume = &rs690_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rs400_gart_tlb_flush, + .gart_set_page = &rs400_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r200_copy_dma, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rs690_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &rs600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r420_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic rv515_asic = { + .init = &rv515_init, + .fini = &rv515_fini, + .suspend = &rv515_suspend, + .resume = &rv515_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &rv515_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &rs600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r420_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r520_asic = { + .init = &r520_init, + .fini = &rv515_fini, + .suspend = &rv515_suspend, + .resume = &r520_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_is_lockup = &r300_gpu_is_lockup, + .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &rv515_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &rs600_irq_set, + .irq_process = &rs600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, + .gui_idle = &r100_gui_idle, + .pm_misc = &rs600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r420_pm_init_profile, + .pm_get_dynpm_state = &r100_pm_get_dynpm_state, +}; + +static struct radeon_asic r600_asic = { + .init = &r600_init, + .fini = &r600_fini, + .suspend = &r600_suspend, + .resume = &r600_resume, + .cp_commit = &r600_cp_commit, + .vga_set_state = &r600_vga_set_state, + .gpu_is_lockup = &r600_gpu_is_lockup, + .asic_reset = &r600_asic_reset, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &r600_irq_set, + .irq_process = &r600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &r600_cs_parse, + .copy_blit = &r600_copy_blit, + .copy_dma = &r600_copy_blit, + .copy = &r600_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &r600_hpd_init, + .hpd_fini = &r600_hpd_fini, + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, + .gui_idle = &r600_gui_idle, + .pm_misc = &r600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r600_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, +}; + +static struct radeon_asic rs780_asic = { + .init = &r600_init, + .fini = &r600_fini, + .suspend = &r600_suspend, + .resume = &r600_resume, + .cp_commit = &r600_cp_commit, + .gpu_is_lockup = &r600_gpu_is_lockup, + .vga_set_state = &r600_vga_set_state, + .asic_reset = &r600_asic_reset, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &r600_irq_set, + .irq_process = &r600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &r600_cs_parse, + .copy_blit = &r600_copy_blit, + .copy_dma = &r600_copy_blit, + .copy = &r600_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = NULL, + .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &rs690_bandwidth_update, + .hpd_init = &r600_hpd_init, + .hpd_fini = &r600_hpd_fini, + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, + .gui_idle = &r600_gui_idle, + .pm_misc = &r600_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &rs780_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, +}; + +static struct radeon_asic rv770_asic = { + .init = &rv770_init, + .fini = &rv770_fini, + .suspend = &rv770_suspend, + .resume = &rv770_resume, + .cp_commit = &r600_cp_commit, + .asic_reset = &r600_asic_reset, + .gpu_is_lockup = &r600_gpu_is_lockup, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &r600_irq_set, + .irq_process = &r600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &r600_cs_parse, + .copy_blit = &r600_copy_blit, + .copy_dma = &r600_copy_blit, + .copy = &r600_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &r600_hpd_init, + .hpd_fini = &r600_hpd_fini, + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, + .gui_idle = &r600_gui_idle, + .pm_misc = &rv770_pm_misc, + .pm_prepare = &rs600_pm_prepare, + .pm_finish = &rs600_pm_finish, + .pm_init_profile = &r600_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, +}; + +static struct radeon_asic evergreen_asic = { + .init = &evergreen_init, + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, + .cp_commit = &r600_cp_commit, + .gpu_is_lockup = &evergreen_gpu_is_lockup, + .asic_reset = &evergreen_asic_reset, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &evergreen_irq_set, + .irq_process = &evergreen_irq_process, + .get_vblank_counter = &evergreen_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &evergreen_cs_parse, + .copy_blit = NULL, + .copy_dma = NULL, + .copy = NULL, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &evergreen_bandwidth_update, + .hpd_init = &evergreen_hpd_init, + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, + .gui_idle = &r600_gui_idle, + .pm_misc = &evergreen_pm_misc, + .pm_prepare = &evergreen_pm_prepare, + .pm_finish = &evergreen_pm_finish, + .pm_init_profile = &r600_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, +}; + +int radeon_asic_init(struct radeon_device *rdev) +{ + radeon_register_accessor_init(rdev); + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RS100: + case CHIP_RV200: + case CHIP_RS200: + rdev->asic = &r100_asic; + break; + case CHIP_R200: + case CHIP_RV250: + case CHIP_RS300: + case CHIP_RV280: + rdev->asic = &r200_asic; + break; + case CHIP_R300: + case CHIP_R350: + case CHIP_RV350: + case CHIP_RV380: + if (rdev->flags & RADEON_IS_PCIE) + rdev->asic = &r300_asic_pcie; + else + rdev->asic = &r300_asic; + break; + case CHIP_R420: + case CHIP_R423: + case CHIP_RV410: + rdev->asic = &r420_asic; + /* handle macs */ + if (rdev->bios == NULL) { + rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; + rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; + rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; + rdev->asic->set_memory_clock = NULL; + } + break; + case CHIP_RS400: + case CHIP_RS480: + rdev->asic = &rs400_asic; + break; + case CHIP_RS600: + rdev->asic = &rs600_asic; + break; + case CHIP_RS690: + case CHIP_RS740: + rdev->asic = &rs690_asic; + break; + case CHIP_RV515: + rdev->asic = &rv515_asic; + break; + case CHIP_R520: + case CHIP_RV530: + case CHIP_RV560: + case CHIP_RV570: + case CHIP_R580: + rdev->asic = &r520_asic; + break; + case CHIP_R600: + case CHIP_RV610: + case CHIP_RV630: + case CHIP_RV620: + case CHIP_RV635: + case CHIP_RV670: + rdev->asic = &r600_asic; + break; + case CHIP_RS780: + case CHIP_RS880: + rdev->asic = &rs780_asic; + break; + case CHIP_RV770: + case CHIP_RV730: + case CHIP_RV710: + case CHIP_RV740: + rdev->asic = &rv770_asic; + break; + case CHIP_CEDAR: + case CHIP_REDWOOD: + case CHIP_JUNIPER: + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + rdev->asic = &evergreen_asic; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + if (rdev->flags & RADEON_IS_IGP) { + rdev->asic->get_memory_clock = NULL; + rdev->asic->set_memory_clock = NULL; + } + + /* set the number of crtcs */ + if (rdev->flags & RADEON_SINGLE_CRTC) + rdev->num_crtc = 1; + else { + if (ASIC_IS_DCE4(rdev)) + rdev->num_crtc = 6; + else + rdev->num_crtc = 2; + } + + return 0; +} + +/* + * Wrapper around modesetting bits. Move to radeon_clocks.c? + */ +int radeon_clocks_init(struct radeon_device *rdev) +{ + int r; + + r = radeon_static_clocks_init(rdev->ddev); + if (r) { + return r; + } + DRM_INFO("Clocks initialized !\n"); + return 0; +} + +void radeon_clocks_fini(struct radeon_device *rdev) +{ +} diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index d3a157b2bcb7..c0bbaa64157a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -45,14 +45,23 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* * r100,rv100,rs100,rv200,rs200 */ -extern int r100_init(struct radeon_device *rdev); -extern void r100_fini(struct radeon_device *rdev); -extern int r100_suspend(struct radeon_device *rdev); -extern int r100_resume(struct radeon_device *rdev); +struct r100_mc_save { + u32 GENMO_WT; + u32 CRTC_EXT_CNTL; + u32 CRTC_GEN_CNTL; + u32 CRTC2_GEN_CNTL; + u32 CUR_OFFSET; + u32 CUR2_OFFSET; +}; +int r100_init(struct radeon_device *rdev); +void r100_fini(struct radeon_device *rdev); +int r100_suspend(struct radeon_device *rdev); +int r100_resume(struct radeon_device *rdev); uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_vga_set_state(struct radeon_device *rdev, bool state); -int r100_gpu_reset(struct radeon_device *rdev); +bool r100_gpu_is_lockup(struct radeon_device *rdev); +int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); @@ -73,7 +82,7 @@ int r100_copy_blit(struct radeon_device *rdev, int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); -int r100_clear_surface_reg(struct radeon_device *rdev, int reg); +void r100_clear_surface_reg(struct radeon_device *rdev, int reg); void r100_bandwidth_update(struct radeon_device *rdev); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r100_ring_test(struct radeon_device *rdev); @@ -82,44 +91,47 @@ void r100_hpd_fini(struct radeon_device *rdev); bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r100_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); - -static struct radeon_asic r100_asic = { - .init = &r100_init, - .fini = &r100_fini, - .suspend = &r100_suspend, - .resume = &r100_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r100_gpu_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r100_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r100_fence_ring_emit, - .cs_parse = &r100_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = NULL, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_legacy_get_engine_clock, - .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = &radeon_legacy_get_memory_clock, - .set_memory_clock = NULL, - .get_pcie_lanes = NULL, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_legacy_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; +int r100_debugfs_rbbm_init(struct radeon_device *rdev); +int r100_debugfs_cp_init(struct radeon_device *rdev); +void r100_cp_disable(struct radeon_device *rdev); +int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); +void r100_cp_fini(struct radeon_device *rdev); +int r100_pci_gart_init(struct radeon_device *rdev); +void r100_pci_gart_fini(struct radeon_device *rdev); +int r100_pci_gart_enable(struct radeon_device *rdev); +void r100_pci_gart_disable(struct radeon_device *rdev); +int r100_debugfs_mc_info_init(struct radeon_device *rdev); +int r100_gui_wait_for_idle(struct radeon_device *rdev); +void r100_ib_fini(struct radeon_device *rdev); +int r100_ib_init(struct radeon_device *rdev); +void r100_irq_disable(struct radeon_device *rdev); +void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); +void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); +void r100_vram_init_sizes(struct radeon_device *rdev); +void r100_wb_disable(struct radeon_device *rdev); +void r100_wb_fini(struct radeon_device *rdev); +int r100_wb_init(struct radeon_device *rdev); +int r100_cp_reset(struct radeon_device *rdev); +void r100_vga_render_disable(struct radeon_device *rdev); +int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + struct radeon_bo *robj); +int r100_cs_parse_packet0(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + const unsigned *auth, unsigned n, + radeon_packet0_check_t check); +int r100_cs_packet_parse(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx); +void r100_enable_bm(struct radeon_device *rdev); +void r100_set_common_regs(struct radeon_device *rdev); +void r100_bm_disable(struct radeon_device *rdev); +extern bool r100_gui_idle(struct radeon_device *rdev); +extern void r100_pm_misc(struct radeon_device *rdev); +extern void r100_pm_prepare(struct radeon_device *rdev); +extern void r100_pm_finish(struct radeon_device *rdev); +extern void r100_pm_init_profile(struct radeon_device *rdev); +extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); /* * r200,rv250,rs300,rv280 @@ -128,44 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, - struct radeon_fence *fence); -static struct radeon_asic r200_asic = { - .init = &r100_init, - .fini = &r100_fini, - .suspend = &r100_suspend, - .resume = &r100_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r100_gpu_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r100_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r100_fence_ring_emit, - .cs_parse = &r100_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_legacy_get_engine_clock, - .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = &radeon_legacy_get_memory_clock, - .set_memory_clock = NULL, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_legacy_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - + struct radeon_fence *fence); /* * r300,r350,rv350,rv380 @@ -174,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev); extern void r300_fini(struct radeon_device *rdev); extern int r300_suspend(struct radeon_device *rdev); extern int r300_resume(struct radeon_device *rdev); -extern int r300_gpu_reset(struct radeon_device *rdev); +extern bool r300_gpu_is_lockup(struct radeon_device *rdev); +extern int r300_asic_reset(struct radeon_device *rdev); extern void r300_ring_start(struct radeon_device *rdev); extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); @@ -186,82 +162,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); -static struct radeon_asic r300_asic = { - .init = &r300_init, - .fini = &r300_fini, - .suspend = &r300_suspend, - .resume = &r300_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_legacy_get_engine_clock, - .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = &radeon_legacy_get_memory_clock, - .set_memory_clock = NULL, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = &rv370_set_pcie_lanes, - .set_clock_gating = &radeon_legacy_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - - -static struct radeon_asic r300_asic_pcie = { - .init = &r300_init, - .fini = &r300_fini, - .suspend = &r300_suspend, - .resume = &r300_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_legacy_get_engine_clock, - .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = &radeon_legacy_get_memory_clock, - .set_memory_clock = NULL, - .set_pcie_lanes = &rv370_set_pcie_lanes, - .set_clock_gating = &radeon_legacy_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - /* * r420,r423,rv410 */ @@ -269,44 +169,7 @@ extern int r420_init(struct radeon_device *rdev); extern void r420_fini(struct radeon_device *rdev); extern int r420_suspend(struct radeon_device *rdev); extern int r420_resume(struct radeon_device *rdev); -static struct radeon_asic r420_asic = { - .init = &r420_init, - .fini = &r420_fini, - .suspend = &r420_suspend, - .resume = &r420_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = &rv370_set_pcie_lanes, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - +extern void r420_pm_init_profile(struct radeon_device *rdev); /* * rs400,rs480 @@ -319,48 +182,11 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); -static struct radeon_asic rs400_asic = { - .init = &rs400_init, - .fini = &rs400_fini, - .suspend = &rs400_suspend, - .resume = &rs400_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &rs400_gart_tlb_flush, - .gart_set_page = &rs400_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &r100_irq_set, - .irq_process = &r100_irq_process, - .get_vblank_counter = &r100_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_legacy_get_engine_clock, - .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = &radeon_legacy_get_memory_clock, - .set_memory_clock = NULL, - .get_pcie_lanes = NULL, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_legacy_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &r100_bandwidth_update, - .hpd_init = &r100_hpd_init, - .hpd_fini = &r100_hpd_fini, - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - /* * rs600. */ +extern int rs600_asic_reset(struct radeon_device *rdev); extern int rs600_init(struct radeon_device *rdev); extern void rs600_fini(struct radeon_device *rdev); extern int rs600_suspend(struct radeon_device *rdev); @@ -378,45 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev); bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void rs600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); - -static struct radeon_asic rs600_asic = { - .init = &rs600_init, - .fini = &rs600_fini, - .suspend = &rs600_suspend, - .resume = &rs600_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &rs600_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &rs600_irq_set, - .irq_process = &rs600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = NULL, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &rs600_bandwidth_update, - .hpd_init = &rs600_hpd_init, - .hpd_fini = &rs600_hpd_fini, - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - +extern void rs600_pm_misc(struct radeon_device *rdev); +extern void rs600_pm_prepare(struct radeon_device *rdev); +extern void rs600_pm_finish(struct radeon_device *rdev); /* * rs690,rs740 @@ -428,51 +218,12 @@ int rs690_suspend(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs690_bandwidth_update(struct radeon_device *rdev); -static struct radeon_asic rs690_asic = { - .init = &rs690_init, - .fini = &rs690_fini, - .suspend = &rs690_suspend, - .resume = &rs690_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &r300_gpu_reset, - .gart_tlb_flush = &rs400_gart_tlb_flush, - .gart_set_page = &rs400_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &r300_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &rs600_irq_set, - .irq_process = &rs600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r200_copy_dma, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = NULL, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &rs690_bandwidth_update, - .hpd_init = &rs600_hpd_init, - .hpd_fini = &rs600_hpd_fini, - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - /* * rv515 */ int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); -int rv515_gpu_reset(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_ring_start(struct radeon_device *rdev); @@ -481,87 +232,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_bandwidth_update(struct radeon_device *rdev); int rv515_resume(struct radeon_device *rdev); int rv515_suspend(struct radeon_device *rdev); -static struct radeon_asic rv515_asic = { - .init = &rv515_init, - .fini = &rv515_fini, - .suspend = &rv515_suspend, - .resume = &rv515_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &rv515_gpu_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &rv515_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &rs600_irq_set, - .irq_process = &rs600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = &rv370_set_pcie_lanes, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &rv515_bandwidth_update, - .hpd_init = &rs600_hpd_init, - .hpd_fini = &rs600_hpd_fini, - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; - /* * r520,rv530,rv560,rv570,r580 */ int r520_init(struct radeon_device *rdev); int r520_resume(struct radeon_device *rdev); -static struct radeon_asic r520_asic = { - .init = &r520_init, - .fini = &rv515_fini, - .suspend = &rv515_suspend, - .resume = &r520_resume, - .vga_set_state = &r100_vga_set_state, - .gpu_reset = &rv515_gpu_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, - .ring_start = &rv515_ring_start, - .ring_test = &r100_ring_test, - .ring_ib_execute = &r100_ring_ib_execute, - .irq_set = &rs600_irq_set, - .irq_process = &rs600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r300_cs_parse, - .copy_blit = &r100_copy_blit, - .copy_dma = &r200_copy_dma, - .copy = &r100_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = &rv370_set_pcie_lanes, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r100_set_surface_reg, - .clear_surface_reg = r100_clear_surface_reg, - .bandwidth_update = &rv515_bandwidth_update, - .hpd_init = &rs600_hpd_init, - .hpd_fini = &rs600_hpd_fini, - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -}; /* * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 @@ -587,11 +263,12 @@ int r600_copy_dma(struct radeon_device *rdev, struct radeon_fence *fence); int r600_irq_process(struct radeon_device *rdev); int r600_irq_set(struct radeon_device *rdev); -int r600_gpu_reset(struct radeon_device *rdev); +bool r600_gpu_is_lockup(struct radeon_device *rdev); +int r600_asic_reset(struct radeon_device *rdev); int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); -int r600_clear_surface_reg(struct radeon_device *rdev, int reg); +void r600_clear_surface_reg(struct radeon_device *rdev, int reg); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, @@ -603,43 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); - -static struct radeon_asic r600_asic = { - .init = &r600_init, - .fini = &r600_fini, - .suspend = &r600_suspend, - .resume = &r600_resume, - .cp_commit = &r600_cp_commit, - .vga_set_state = &r600_vga_set_state, - .gpu_reset = &r600_gpu_reset, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .ring_test = &r600_ring_test, - .ring_ib_execute = &r600_ring_ib_execute, - .irq_set = &r600_irq_set, - .irq_process = &r600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r600_fence_ring_emit, - .cs_parse = &r600_cs_parse, - .copy_blit = &r600_copy_blit, - .copy_dma = &r600_copy_blit, - .copy = &r600_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = NULL, - .set_clock_gating = NULL, - .set_surface_reg = r600_set_surface_reg, - .clear_surface_reg = r600_clear_surface_reg, - .bandwidth_update = &rv515_bandwidth_update, - .hpd_init = &r600_hpd_init, - .hpd_fini = &r600_hpd_fini, - .hpd_sense = &r600_hpd_sense, - .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, -}; +extern bool r600_gui_idle(struct radeon_device *rdev); +extern void r600_pm_misc(struct radeon_device *rdev); +extern void r600_pm_init_profile(struct radeon_device *rdev); +extern void rs780_pm_init_profile(struct radeon_device *rdev); +extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -648,93 +293,30 @@ int rv770_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev); -int rv770_gpu_reset(struct radeon_device *rdev); - -static struct radeon_asic rv770_asic = { - .init = &rv770_init, - .fini = &rv770_fini, - .suspend = &rv770_suspend, - .resume = &rv770_resume, - .cp_commit = &r600_cp_commit, - .gpu_reset = &rv770_gpu_reset, - .vga_set_state = &r600_vga_set_state, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .ring_test = &r600_ring_test, - .ring_ib_execute = &r600_ring_ib_execute, - .irq_set = &r600_irq_set, - .irq_process = &r600_irq_process, - .get_vblank_counter = &rs600_get_vblank_counter, - .fence_ring_emit = &r600_fence_ring_emit, - .cs_parse = &r600_cs_parse, - .copy_blit = &r600_copy_blit, - .copy_dma = &r600_copy_blit, - .copy = &r600_copy_blit, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_atom_set_clock_gating, - .set_surface_reg = r600_set_surface_reg, - .clear_surface_reg = r600_clear_surface_reg, - .bandwidth_update = &rv515_bandwidth_update, - .hpd_init = &r600_hpd_init, - .hpd_fini = &r600_hpd_fini, - .hpd_sense = &r600_hpd_sense, - .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, -}; +extern void rv770_pm_misc(struct radeon_device *rdev); /* * evergreen */ +void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); int evergreen_suspend(struct radeon_device *rdev); int evergreen_resume(struct radeon_device *rdev); -int evergreen_gpu_reset(struct radeon_device *rdev); +bool evergreen_gpu_is_lockup(struct radeon_device *rdev); +int evergreen_asic_reset(struct radeon_device *rdev); void evergreen_bandwidth_update(struct radeon_device *rdev); void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void evergreen_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); - -static struct radeon_asic evergreen_asic = { - .init = &evergreen_init, - .fini = &evergreen_fini, - .suspend = &evergreen_suspend, - .resume = &evergreen_resume, - .cp_commit = NULL, - .gpu_reset = &evergreen_gpu_reset, - .vga_set_state = &r600_vga_set_state, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .ring_test = NULL, - .ring_ib_execute = NULL, - .irq_set = NULL, - .irq_process = NULL, - .get_vblank_counter = NULL, - .fence_ring_emit = NULL, - .cs_parse = NULL, - .copy_blit = NULL, - .copy_dma = NULL, - .copy = NULL, - .get_engine_clock = &radeon_atom_get_engine_clock, - .set_engine_clock = &radeon_atom_set_engine_clock, - .get_memory_clock = &radeon_atom_get_memory_clock, - .set_memory_clock = &radeon_atom_set_memory_clock, - .set_pcie_lanes = NULL, - .set_clock_gating = NULL, - .set_surface_reg = r600_set_surface_reg, - .clear_surface_reg = r600_clear_surface_reg, - .bandwidth_update = &evergreen_bandwidth_update, - .hpd_init = &evergreen_hpd_init, - .hpd_fini = &evergreen_hpd_fini, - .hpd_sense = &evergreen_hpd_sense, - .hpd_set_polarity = &evergreen_hpd_set_polarity, -}; +u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); +int evergreen_irq_set(struct radeon_device *rdev); +int evergreen_irq_process(struct radeon_device *rdev); +extern int evergreen_cs_parse(struct radeon_cs_parser *p); +extern void evergreen_pm_misc(struct radeon_device *rdev); +extern void evergreen_pm_prepare(struct radeon_device *rdev); +extern void evergreen_pm_finish(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 93783b15c81d..10673ae59cfa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -69,52 +69,54 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; - uint16_t data_offset; - int i; + uint16_t data_offset, size; + int i, num_indices; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; - atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); - - i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); - - - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { - gpio = &i2c_info->asGPIO_Info[i]; - - if (gpio->sucI2cId.ucAccess == id) { - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); - i2c.en_data_mask = (1 << gpio->ucDataEnShift); - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); - - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) - i2c.hw_capable = true; - else - i2c.hw_capable = false; - - if (gpio->sucI2cId.ucAccess == 0xa0) - i2c.mm_i2c = true; - else - i2c.mm_i2c = false; - - i2c.i2c_id = gpio->sucI2cId.ucAccess; - - i2c.valid = true; - break; + if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { + i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); + + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_GPIO_I2C_ASSIGMENT); + + for (i = 0; i < num_indices; i++) { + gpio = &i2c_info->asGPIO_Info[i]; + + if (gpio->sucI2cId.ucAccess == id) { + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); + i2c.en_data_mask = (1 << gpio->ucDataEnShift); + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); + + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) + i2c.hw_capable = true; + else + i2c.hw_capable = false; + + if (gpio->sucI2cId.ucAccess == 0xa0) + i2c.mm_i2c = true; + else + i2c.mm_i2c = false; + + i2c.i2c_id = gpio->sucI2cId.ucAccess; + + i2c.valid = true; + break; + } } } @@ -135,20 +137,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); gpio.valid = false; - atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); - - gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); + if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { + gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); - num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_GPIO_PIN_ASSIGNMENT); - for (i = 0; i < num_indices; i++) { - pin = &gpio_info->asGPIO_Pin[i]; - if (id == pin->ucGPIO_ID) { - gpio.id = pin->ucGPIO_ID; - gpio.reg = pin->usGpioPin_AIndex * 4; - gpio.mask = (1 << pin->ucGpioPinBitShift); - gpio.valid = true; - break; + for (i = 0; i < num_indices; i++) { + pin = &gpio_info->asGPIO_Pin[i]; + if (id == pin->ucGPIO_ID) { + gpio.id = pin->ucGPIO_ID; + gpio.reg = pin->usGpioPin_AIndex * 4; + gpio.mask = (1 << pin->ucGpioPinBitShift); + gpio.valid = true; + break; + } } } @@ -264,6 +267,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return false; + if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) + *line_mux = 0x90; } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ @@ -275,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } + /* ASUS HD 3600 board lists the DVI port as HDMI */ + if ((dev->pdev->device == 0x9598) && + (dev->pdev->subsystem_vendor == 0x1043) && + (dev->pdev->subsystem_device == 0x01e4)) { + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + } + } + /* ASUS HD 3450 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x95C5) && (dev->pdev->subsystem_vendor == 0x1043) && @@ -395,9 +409,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) struct radeon_gpio_rec gpio; struct radeon_hpd hpd; - atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); - - if (data_offset == 0) + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; if (crev < 2) @@ -449,37 +461,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - atom_parse_data_header(ctx, index, &size, &frev, - &crev, &igp_offset); - - if (crev >= 2) { - igp_obj = - (ATOM_INTEGRATED_SYSTEM_INFO_V2 - *) (ctx->bios + igp_offset); - - if (igp_obj) { - uint32_t slot_config, ct; - - if (con_obj_num == 1) - slot_config = - igp_obj-> - ulDDISlot1Config; - else - slot_config = - igp_obj-> - ulDDISlot2Config; - - ct = (slot_config >> 16) & 0xff; - connector_type = - object_connector_convert - [ct]; - connector_object_id = ct; - igp_lane_info = - slot_config & 0xffff; + if (atom_parse_data_header(ctx, index, &size, &frev, + &crev, &igp_offset)) { + + if (crev >= 2) { + igp_obj = + (ATOM_INTEGRATED_SYSTEM_INFO_V2 + *) (ctx->bios + igp_offset); + + if (igp_obj) { + uint32_t slot_config, ct; + + if (con_obj_num == 1) + slot_config = + igp_obj-> + ulDDISlot1Config; + else + slot_config = + igp_obj-> + ulDDISlot2Config; + + ct = (slot_config >> 16) & 0xff; + connector_type = + object_connector_convert + [ct]; + connector_object_id = ct; + igp_lane_info = + slot_config & 0xffff; + } else + continue; } else continue; - } else - continue; + } else { + igp_lane_info = 0; + connector_type = + object_connector_convert[con_obj_id]; + connector_object_id = con_obj_id; + } } else { igp_lane_info = 0; connector_type = @@ -521,6 +539,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) } /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = RADEON_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { @@ -538,7 +558,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ATOM_I2C_RECORD *i2c_record; ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; - hpd.hpd = RADEON_HPD_NONE; while (record->ucRecordType > 0 && record-> @@ -576,13 +595,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) break; } } - } else { - hpd.hpd = RADEON_HPD_NONE; - ddc_bus.valid = false; } /* needed for aux chan transactions */ - ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; + ddc_bus.hpd = hpd.hpd; conn_id = le16_to_cpu(path->usConnObjectId); @@ -627,20 +643,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev, uint8_t frev, crev; ATOM_XTMDS_INFO *xtmds; - atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); - xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); + if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { + xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); - if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { - if (connector_type == DRM_MODE_CONNECTOR_DVII) - return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; - else - return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; - } else { - if (connector_type == DRM_MODE_CONNECTOR_DVII) - return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; - else - return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; - } + if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { + if (connector_type == DRM_MODE_CONNECTOR_DVII) + return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; + else + return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; + } else { + if (connector_type == DRM_MODE_CONNECTOR_DVII) + return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; + else + return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; + } + } else + return supported_devices_connector_object_id_convert + [connector_type]; } else { return supported_devices_connector_object_id_convert [connector_type]; @@ -670,9 +689,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; + struct bios_connector *bios_connectors; + size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; - atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); + bios_connectors = kzalloc(bc_size, GFP_KERNEL); + if (!bios_connectors) + return false; + + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset)) { + kfree(bios_connectors); + return false; + } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); @@ -840,6 +868,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct radeon_link_encoder_connector(dev); + kfree(bios_connectors); return true; } @@ -865,14 +894,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) struct radeon_pll *mpll = &rdev->clock.mpll; uint16_t data_offset; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, - &crev, &data_offset); - - firmware_info = - (union firmware_info *)(mode_info->atom_context->bios + - data_offset); - - if (firmware_info) { + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); /* pixel clocks */ p1pll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); @@ -887,6 +913,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); + if (crev >= 4) { + p1pll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (p1pll->lcd_pll_out_min == 0) + p1pll->lcd_pll_out_min = p1pll->pll_out_min; + p1pll->lcd_pll_out_max = + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; + if (p1pll->lcd_pll_out_max == 0) + p1pll->lcd_pll_out_max = p1pll->pll_out_max; + } else { + p1pll->lcd_pll_out_min = p1pll->pll_out_min; + p1pll->lcd_pll_out_max = p1pll->pll_out_max; + } + if (p1pll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) p1pll->pll_out_min = 64800; @@ -992,17 +1032,21 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) u8 frev, crev; u16 data_offset; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, - &crev, &data_offset); - - igp_info = (union igp_info *)(mode_info->atom_context->bios + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + data_offset); - - if (igp_info) { switch (crev) { case 1: - if (igp_info->info.ucMemoryType & 0xf0) - return true; + /* AMD IGPS */ + if ((rdev->family == CHIP_RS690) || + (rdev->family == CHIP_RS740)) { + if (igp_info->info.ulBootUpMemoryClock) + return true; + } else { + if (igp_info->info.ucMemoryType & 0xf0) + return true; + } break; case 2: if (igp_info->info_2.ucMemoryType & 0x0f) @@ -1029,14 +1073,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, uint16_t maxfreq; int i; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, - &crev, &data_offset); - - tmds_info = - (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + - data_offset); + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + tmds_info = + (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + + data_offset); - if (tmds_info) { maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); for (i = 0; i < 4; i++) { tmds->tmds_pll[i].freq = @@ -1085,13 +1127,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct if (id > ATOM_MAX_SS_ENTRY) return NULL; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, - &crev, &data_offset); + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + ss_info = + (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); - ss_info = - (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); - - if (ss_info) { ss = kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); @@ -1114,30 +1154,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct return ss; } -static void radeon_atom_apply_lvds_quirks(struct drm_device *dev, - struct radeon_encoder_atom_dig *lvds) -{ - - /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */ - if ((dev->pdev->device == 0x95c4) && - (dev->pdev->subsystem_vendor == 0x1179) && - (dev->pdev->subsystem_device == 0xff50)) { - if ((lvds->native_mode.hdisplay == 1280) && - (lvds->native_mode.vdisplay == 800)) - lvds->pll_algo = PLL_ALGO_LEGACY; - } - - /* Dell Studio 15 laptop panel doesn't like new pll divider algo */ - if ((dev->pdev->device == 0x95c4) && - (dev->pdev->subsystem_vendor == 0x1028) && - (dev->pdev->subsystem_device == 0x029f)) { - if ((lvds->native_mode.hdisplay == 1280) && - (lvds->native_mode.vdisplay == 800)) - lvds->pll_algo = PLL_ALGO_LEGACY; - } - -} - union lvds_info { struct _ATOM_LVDS_INFO info; struct _ATOM_LVDS_INFO_V12 info_12; @@ -1156,13 +1172,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, - &crev, &data_offset); - - lvds_info = - (union lvds_info *)(mode_info->atom_context->bios + data_offset); - - if (lvds_info) { + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + lvds_info = + (union lvds_info *)(mode_info->atom_context->bios + data_offset); lvds = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); @@ -1184,7 +1197,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->native_mode.vtotal = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = @@ -1220,9 +1233,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->pll_algo = PLL_ALGO_LEGACY; } - /* LVDS quirks */ - radeon_atom_apply_lvds_quirks(dev, lvds); - encoder->native_mode = lvds->native_mode; } return lvds; @@ -1241,11 +1251,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) uint8_t bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + dac_info = (struct _COMPASSIONATE_DATA *) + (mode_info->atom_context->bios + data_offset); - dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); - - if (dac_info) { p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); if (!p_dac) @@ -1270,12 +1280,14 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, u8 frev, crev; u16 data_offset, misc; - atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); + if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, + &frev, &crev, &data_offset)) + return false; switch (crev) { case 1: tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); - if (index > MAX_SUPPORTED_TV_TIMING) + if (index >= MAX_SUPPORTED_TV_TIMING) return false; mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); @@ -1313,7 +1325,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, break; case 2: tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); - if (index > MAX_SUPPORTED_TV_TIMING_V1_2) + if (index >= MAX_SUPPORTED_TV_TIMING_V1_2) return false; dtd_timings = &tv_info_v1_2->aModeTimings[index]; @@ -1362,47 +1374,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev) struct _ATOM_ANALOG_TV_INFO *tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { - tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); + tv_info = (struct _ATOM_ANALOG_TV_INFO *) + (mode_info->atom_context->bios + data_offset); - switch (tv_info->ucTV_BootUpDefaultStandard) { - case ATOM_TV_NTSC: - tv_std = TV_STD_NTSC; - DRM_INFO("Default TV standard: NTSC\n"); - break; - case ATOM_TV_NTSCJ: - tv_std = TV_STD_NTSC_J; - DRM_INFO("Default TV standard: NTSC-J\n"); - break; - case ATOM_TV_PAL: - tv_std = TV_STD_PAL; - DRM_INFO("Default TV standard: PAL\n"); - break; - case ATOM_TV_PALM: - tv_std = TV_STD_PAL_M; - DRM_INFO("Default TV standard: PAL-M\n"); - break; - case ATOM_TV_PALN: - tv_std = TV_STD_PAL_N; - DRM_INFO("Default TV standard: PAL-N\n"); - break; - case ATOM_TV_PALCN: - tv_std = TV_STD_PAL_CN; - DRM_INFO("Default TV standard: PAL-CN\n"); - break; - case ATOM_TV_PAL60: - tv_std = TV_STD_PAL_60; - DRM_INFO("Default TV standard: PAL-60\n"); - break; - case ATOM_TV_SECAM: - tv_std = TV_STD_SECAM; - DRM_INFO("Default TV standard: SECAM\n"); - break; - default: - tv_std = TV_STD_NTSC; - DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); - break; + switch (tv_info->ucTV_BootUpDefaultStandard) { + case ATOM_TV_NTSC: + tv_std = TV_STD_NTSC; + DRM_INFO("Default TV standard: NTSC\n"); + break; + case ATOM_TV_NTSCJ: + tv_std = TV_STD_NTSC_J; + DRM_INFO("Default TV standard: NTSC-J\n"); + break; + case ATOM_TV_PAL: + tv_std = TV_STD_PAL; + DRM_INFO("Default TV standard: PAL\n"); + break; + case ATOM_TV_PALM: + tv_std = TV_STD_PAL_M; + DRM_INFO("Default TV standard: PAL-M\n"); + break; + case ATOM_TV_PALN: + tv_std = TV_STD_PAL_N; + DRM_INFO("Default TV standard: PAL-N\n"); + break; + case ATOM_TV_PALCN: + tv_std = TV_STD_PAL_CN; + DRM_INFO("Default TV standard: PAL-CN\n"); + break; + case ATOM_TV_PAL60: + tv_std = TV_STD_PAL_60; + DRM_INFO("Default TV standard: PAL-60\n"); + break; + case ATOM_TV_SECAM: + tv_std = TV_STD_SECAM; + DRM_INFO("Default TV standard: SECAM\n"); + break; + default: + tv_std = TV_STD_NTSC; + DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); + break; + } } return tv_std; } @@ -1420,11 +1435,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) uint8_t bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { - dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); + dac_info = (struct _COMPASSIONATE_DATA *) + (mode_info->atom_context->bios + data_offset); - if (dac_info) { tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); if (!tv_dac) @@ -1447,6 +1463,34 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) return tv_dac; } +static const char *thermal_controller_names[] = { + "NONE", + "lm63", + "adm1032", + "adm1030", + "max6649", + "lm64", + "f75375", + "asc7xxx", +}; + +static const char *pp_lib_thermal_controller_names[] = { + "NONE", + "lm63", + "adm1032", + "adm1030", + "max6649", + "lm64", + "f75375", + "RV6xx", + "RV770", + "adt7473", + "External GPIO", + "Evergreen", + "adt7473 with internal", + +}; + union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; @@ -1466,18 +1510,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) struct _ATOM_PPLIB_STATE *power_state; int num_modes = 0, i, j; int state_index = 0, mode_index = 0; + struct radeon_i2c_bus_rec i2c_bus; - atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); - - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + rdev->pm.default_power_state_index = -1; - rdev->pm.default_power_state = NULL; - - if (power_info) { + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); if (frev < 4) { + /* add the i2c bus for thermal/fan chip */ + if (power_info->info.ucOverdriveThermalController > 0) { + DRM_INFO("Possible %s thermal controller at 0x%02x\n", + thermal_controller_names[power_info->info.ucOverdriveThermalController], + power_info->info.ucOverdriveControllerAddress >> 1); + i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); + rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = thermal_controller_names[power_info->info. + ucOverdriveThermalController]; + info.addr = power_info->info.ucOverdriveControllerAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + } + } num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { @@ -1491,16 +1551,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; - /* skip overclock modes for now */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk > - rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || - (rdev->pm.power_state[state_index].clock_info[0].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) - continue; - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + rdev->pm.power_state[state_index].pcie_lanes = power_info->info.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1518,6 +1573,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = misc; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = @@ -1531,15 +1588,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } else if (state_index == 0) { + rdev->pm.power_state[state_index].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; } state_index++; break; @@ -1553,17 +1618,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; - /* skip overclock modes for now */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk > - rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || - (rdev->pm.power_state[state_index].clock_info[0].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) - continue; - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + rdev->pm.power_state[state_index].pcie_lanes = power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1581,6 +1641,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = @@ -1594,18 +1657,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; + if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } else if (state_index == 0) { + rdev->pm.power_state[state_index].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; } state_index++; break; @@ -1619,17 +1693,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; - /* skip overclock modes for now */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk > - rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || - (rdev->pm.power_state[state_index].clock_info[0].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) - continue; - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + rdev->pm.power_state[state_index].pcie_lanes = power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = @@ -1653,6 +1722,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; } } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = @@ -1666,24 +1738,89 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; + } else if (state_index == 0) { + rdev->pm.power_state[state_index].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; } state_index++; break; } } - } else if (frev == 4) { + /* last mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[state_index - 1].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index - 1; + rdev->pm.power_state[state_index - 1].default_clock_mode = + &rdev->pm.power_state[state_index - 1].clock_info[0]; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = 0; + rdev->pm.power_state[state_index].misc2 = 0; + } + } else { + int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + uint8_t fw_frev, fw_crev; + uint16_t fw_data_offset, vddc = 0; + union firmware_info *firmware_info; + ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; + + if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, + &fw_frev, &fw_crev, &fw_data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + fw_data_offset); + vddc = firmware_info->info_14.usBootUpVDDCVoltage; + } + + /* add the i2c bus for thermal/fan chip */ + /* no support for internal controller yet */ + if (controller->ucType > 0) { + if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || + (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || + (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + } else if ((controller->ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || + (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { + DRM_INFO("Special thermal controller config\n"); + } else { + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); + rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = pp_lib_thermal_controller_names[controller->ucType]; + info.addr = controller->ucI2cAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + } + + } + } + /* first mode is usually default, followed by low to high */ for (i = 0; i < power_info->info_4.ucNumStates; i++) { mode_index = 0; power_state = (struct _ATOM_PPLIB_STATE *) @@ -1712,14 +1849,31 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) /* skip invalid modes */ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) continue; - /* skip overclock modes for now */ - if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) + /* voltage works differently on IGPs */ + mode_index++; + } else if (ASIC_IS_DCE4(rdev)) { + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = + (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + + (power_state->ucClockStateIndices[j] * + power_info->info_4.ucClockInfoSize)); + sclk = le16_to_cpu(clock_info->usEngineClockLow); + sclk |= clock_info->ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->usMemoryClockLow); + mclk |= clock_info->ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) continue; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = clock_info->usVDDC; + /* XXX usVDDCI */ mode_index++; } else { struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = @@ -1739,12 +1893,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) continue; - /* skip overclock modes for now */ - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > - rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) - continue; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = @@ -1756,7 +1904,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) if (mode_index) { misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); misc2 = le16_to_cpu(non_clock_info->usClassification); - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; + rdev->pm.power_state[state_index].pcie_lanes = ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { @@ -1773,22 +1923,46 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) POWER_STATE_TYPE_PERFORMANCE; break; } + rdev->pm.power_state[state_index].flags = 0; + if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) + rdev->pm.power_state[state_index].flags |= + RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; + /* patch the table values with the default slck/mclk from firmware info */ + for (j = 0; j < mode_index; j++) { + rdev->pm.power_state[state_index].clock_info[j].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[j].sclk = + rdev->clock.default_sclk; + if (vddc) + rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = + vddc; + } } state_index++; } } + /* if multiple clock modes, mark the lowest as no display */ + for (i = 0; i < state_index; i++) { + if (rdev->pm.power_state[i].num_clock_modes > 1) + rdev->pm.power_state[i].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } + /* first mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[0].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = 0; + rdev->pm.power_state[0].default_clock_mode = + &rdev->pm.power_state[0].clock_info[0]; + } } } else { - /* XXX figure out some good default low power mode for cards w/out power tables */ - } - - if (rdev->pm.default_power_state == NULL) { /* add the default mode */ rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; @@ -1798,18 +1972,17 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - if (rdev->asic->get_pcie_lanes) - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); - else - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].pcie_lanes = 16; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].flags = 0; state_index++; } + rdev->pm.num_power_states = state_index; - rdev->pm.current_power_state = rdev->pm.default_power_state; - rdev->pm.current_clock_mode = - rdev->pm.default_power_state->default_clock_mode; + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) @@ -1865,6 +2038,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +union set_voltage { + struct _SET_VOLTAGE_PS_ALLOCATION alloc; + struct _SET_VOLTAGE_PARAMETERS v1; + struct _SET_VOLTAGE_PARAMETERS_V2 v2; +}; + +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev, volt_index = level; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (crev) { + case 1: + args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; + args.v1.ucVoltageIndex = volt_index; + break; + case 2: + args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; + args.v2.usVoltageLevel = cpu_to_le16(level); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + + + void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 3f557c4151e0..ed5dfe58f29c 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -7,6 +7,7 @@ * ATPX support for both Intel/ATI */ #include <linux/vga_switcheroo.h> +#include <linux/slab.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <linux/pci.h> diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 557240460526..2c9213739999 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -31,6 +31,7 @@ #include "atom.h" #include <linux/vga_switcheroo.h> +#include <linux/slab.h> /* * BIOS. */ @@ -47,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) resource_size_t vram_base; resource_size_t size = 256 * 1024; /* ??? */ + if (!(rdev->flags & RADEON_IS_IGP)) + if (!radeon_card_posted(rdev)) + return false; + rdev->bios = NULL; vram_base = drm_get_resource_start(rdev->ddev, 0); bios = ioremap(vram_base, size); @@ -84,12 +89,11 @@ static bool radeon_read_bios(struct radeon_device *rdev) pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmalloc(size, GFP_KERNEL); + rdev->bios = kmemdup(bios, size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } - memcpy(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e9ea38ece375..2417d7b06fdb 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { int edid_info; struct edid *edid; + unsigned char *raw; edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); if (!edid_info) return false; - edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), - GFP_KERNEL); + raw = rdev->bios + edid_info; + edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); if (edid == NULL) return false; - memcpy((unsigned char *)edid, - (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH); + memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); if (!drm_edid_is_valid(edid)) { kfree(edid); @@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde case CHIP_RS300: switch (ddc_line) { case RADEON_GPIO_DVI_DDC: - /* in theory this should be hw capable, - * but it doesn't seem to work - */ - i2c.hw_capable = false; + i2c.hw_capable = true; break; default: i2c.hw_capable = false; @@ -603,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde } i2c.mm_i2c = false; i2c.i2c_id = 0; - i2c.hpd_id = 0; + i2c.hpd = RADEON_HPD_NONE; if (ddc_line) i2c.valid = true; @@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) p1pll->reference_div = RBIOS16(pll_info + 0x10); p1pll->pll_out_min = RBIOS32(pll_info + 0x12); p1pll->pll_out_max = RBIOS32(pll_info + 0x16); + p1pll->lcd_pll_out_min = p1pll->pll_out_min; + p1pll->lcd_pll_out_max = p1pll->pll_out_max; if (rev > 9) { p1pll->pll_in_min = RBIOS32(pll_info + 0x36); @@ -761,7 +760,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - found = 1; + /* if the values are all zeros, use the table */ + if (p_dac->ps2_pdac_adj) + found = 1; } if (!found) /* fallback to defaults */ @@ -896,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0x10) & 0xf; dac = RBIOS8(dac_info + 0x11) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } else if (rev > 1) { bg = RBIOS8(dac_info + 0xc) & 0xf; dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; @@ -909,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0xe) & 0xf; dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } tv_dac->tv_std = radeon_combios_get_tv_info(rdev); } @@ -926,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } else { bg = RBIOS8(dac_info + 0x4) & 0xf; dac = RBIOS8(dac_info + 0x5) & 0xf; @@ -934,7 +941,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } } else { DRM_INFO("No TV DAC info found in BIOS\n"); @@ -1104,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder break; if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && - (RBIOS16(tmp + 2) == - lvds->native_mode.vdisplay)) { - lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; - lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; - lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + - RBIOS16(tmp + 21)) * 8; - - lvds->native_mode.vtotal = RBIOS16(tmp + 24); - lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; - lvds->native_mode.vsync_end = - ((RBIOS16(tmp + 28) & 0xf800) >> 11) + - (RBIOS16(tmp + 28) & 0x7ff); + (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { + lvds->native_mode.htotal = lvds->native_mode.hdisplay + + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; + lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + + (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + + (RBIOS8(tmp + 23) * 8); + + lvds->native_mode.vtotal = lvds->native_mode.vdisplay + + (RBIOS16(tmp + 24) - RBIOS16(tmp + 26)); + lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + + ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26)); + lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + + ((RBIOS16(tmp + 28) & 0xf800) >> 11); lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; lvds->native_mode.flags = 0; @@ -1400,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; } else #endif /* CONFIG_PPC_PMAC */ +#ifdef CONFIG_PPC64 + if (ASIC_IS_RN50(rdev)) + rdev->mode_info.connector_table = CT_RN50_POWER; + else +#endif rdev->mode_info.connector_table = CT_GENERIC; } @@ -1842,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; + case CT_RN50_POWER: + DRM_INFO("Connector Table: %d (rn50-power)\n", + rdev->mode_info.connector_table); + /* VGA - primary dac */ + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_id(dev, + ATOM_DEVICE_CRT1_SUPPORT, + 1), + ATOM_DEVICE_CRT1_SUPPORT); + radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, + CONNECTOR_OBJECT_ID_VGA, + &hpd); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_id(dev, + ATOM_DEVICE_CRT2_SUPPORT, + 2), + ATOM_DEVICE_CRT2_SUPPORT); + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, + CONNECTOR_OBJECT_ID_VGA, + &hpd); + break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); @@ -1895,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, return false; } - /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ - if (dev->pdev->device == 0x5159 && - dev->pdev->subsystem_vendor == 0x1002 && - dev->pdev->subsystem_device == 0x013a) { - if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) - *legacy_connector = CONNECTOR_CRT_LEGACY; - - } - /* X300 card with extra non-existent DVI port */ if (dev->pdev->device == 0x5B60 && dev->pdev->subsystem_vendor == 0x17af && @@ -2015,6 +2049,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); break; default: + ddc_i2c.valid = false; break; } @@ -2187,7 +2222,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) ATOM_DEVICE_DFP1_SUPPORT); ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); - hpd.hpd = RADEON_HPD_NONE; + hpd.hpd = RADEON_HPD_1; radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT | @@ -2328,6 +2363,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if (RBIOS8(tv_info + 6) == 'T') { if (radeon_apply_legacy_tv_quirks(dev)) { hpd.hpd = RADEON_HPD_NONE; + ddc_i2c.valid = false; radeon_add_legacy_encoder(dev, radeon_get_encoder_id (dev, @@ -2357,7 +2393,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) u8 rev, blocks, tmp; int state_index = 0; - rdev->pm.default_power_state = NULL; + rdev->pm.default_power_state_index = -1; if (rdev->flags & RADEON_IS_MOBILITY) { offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); @@ -2371,17 +2407,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) goto default_mode; - /* skip overclock modes for now */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk > - rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || - (rdev->pm.power_state[state_index].clock_info[0].sclk > - rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) - goto default_mode; rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; misc = RBIOS16(offset + 0x5 + 0x0); if (rev > 4) misc2 = RBIOS16(offset + 0x5 + 0xe); + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; if (misc & 0x4) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; if (misc & 0x8) @@ -2428,8 +2460,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) } else rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; if (rev > 6) - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + rdev->pm.power_state[state_index].pcie_lanes = RBIOS8(offset + 0x5 + 0x10); + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; state_index++; } else { /* XXX figure out some good default low power mode for mobility cards w/out power tables */ @@ -2446,17 +2479,19 @@ default_mode: rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - if (rdev->asic->get_pcie_lanes) - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); + if ((state_index > 0) && + (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) + rdev->pm.power_state[state_index].clock_info[0].voltage = + rdev->pm.power_state[0].clock_info[0].voltage; else - rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; - rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + rdev->pm.power_state[state_index].pcie_lanes = 16; + rdev->pm.power_state[state_index].flags = 0; + rdev->pm.default_power_state_index = state_index; rdev->pm.num_power_states = state_index + 1; - rdev->pm.current_power_state = rdev->pm.default_power_state; - rdev->pm.current_clock_mode = - rdev->pm.default_power_state->default_clock_mode; + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; } void radeon_external_tmds_setup(struct drm_encoder *encoder) @@ -3007,6 +3042,22 @@ void radeon_combios_asic_init(struct drm_device *dev) combios_write_ram_size(dev); } + /* quirk for rs4xx HP nx6125 laptop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x308b) + return; + + /* quirk for rs4xx HP dv5000 laptop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x30a4) + return; + /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ee0083f982d8..adccbc2c202c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_connector *conflict; + struct radeon_connector *radeon_conflict; int i; list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { if (conflict == connector) continue; + radeon_conflict = to_radeon_connector(conflict); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (conflict->encoder_ids[i] == 0) break; @@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, if (conflict->status != connector_status_connected) continue; + if (radeon_conflict->use_digital) + continue; + if (priority == true) { DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); @@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr if (property == rdev->mode_info.coherent_mode_property) { struct radeon_encoder_atom_dig *dig; + bool new_coherent_mode; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); @@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr return 0; dig = radeon_encoder->enc_priv; - dig->coherent_mode = val ? true : false; - radeon_property_change_mode(&radeon_encoder->base); + new_coherent_mode = val ? true : false; + if (dig->coherent_mode != new_coherent_mode) { + dig->coherent_mode = new_coherent_mode; + radeon_property_change_mode(&radeon_encoder->base); + } } if (property == rdev->mode_info.tv_std_property) { @@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; - if (rdev->is_atom_bios) { + if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { struct radeon_encoder_atom_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; @@ -762,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect } else ret = connector_status_connected; - /* multiple connectors on the same encoder with the same ddc line - * This tends to be HDMI and DVI on the same encoder with the - * same ddc line. If the edid says HDMI, consider the HDMI port - * connected and the DVI port disconnected. If the edid doesn't - * say HDMI, vice versa. + /* This gets complicated. We have boards with VGA + HDMI with a + * shared DDC line and we have boards with DVI-D + HDMI with a shared + * DDC line. The latter is more complex because with DVI<->HDMI adapters + * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { if (connector == list_connector) continue; list_radeon_connector = to_radeon_connector(list_connector); - if (radeon_connector->devices == list_radeon_connector->devices) { - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { - if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; - ret = connector_status_disconnected; - } - } else { - if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || - (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { + if (list_radeon_connector->shared_ddc && + (list_radeon_connector->ddc_bus->rec.i2c_id == + radeon_connector->ddc_bus->rec.i2c_id)) { + /* cases where both connectors are digital */ + if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { + /* hpd is our only option in this case */ + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; ret = connector_status_disconnected; @@ -940,7 +946,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) if (radeon_connector->edid) kfree(radeon_connector->edid); if (radeon_dig_connector->dp_i2c_bus) - radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); + radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); kfree(radeon_connector->con_priv); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); @@ -1032,7 +1038,6 @@ radeon_add_atom_connector(struct drm_device *dev, struct radeon_connector_atom_dig *radeon_dig_connector; uint32_t subpixel_order = SubPixelNone; bool shared_ddc = false; - int ret; /* fixme - tv/cv/din */ if (connector_type == DRM_MODE_CONNECTOR_Unknown) @@ -1067,9 +1072,7 @@ radeon_add_atom_connector(struct drm_device *dev, switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); if (!radeon_connector->ddc_bus) @@ -1079,12 +1082,11 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + connector->polled = DRM_CONNECTOR_POLL_CONNECT; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); if (!radeon_connector->ddc_bus) @@ -1104,9 +1106,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); if (!radeon_connector->ddc_bus) @@ -1132,9 +1132,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); if (!radeon_connector->ddc_bus) @@ -1154,9 +1152,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ if (connector_type == DRM_MODE_CONNECTOR_eDP) @@ -1182,9 +1178,7 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_9PinDIN: if (radeon_tv == 1) { drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, @@ -1202,9 +1196,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); if (!radeon_connector->ddc_bus) @@ -1217,6 +1209,12 @@ radeon_add_atom_connector(struct drm_device *dev, break; } + if (hpd->hpd == RADEON_HPD_NONE) { + if (i2c_bus->valid) + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + } else + connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); return; @@ -1241,7 +1239,6 @@ radeon_add_legacy_connector(struct drm_device *dev, struct drm_connector *connector; struct radeon_connector *radeon_connector; uint32_t subpixel_order = SubPixelNone; - int ret; /* fixme - tv/cv/din */ if (connector_type == DRM_MODE_CONNECTOR_Unknown) @@ -1269,9 +1266,7 @@ radeon_add_legacy_connector(struct drm_device *dev, switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); if (!radeon_connector->ddc_bus) @@ -1281,12 +1276,11 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + connector->polled = DRM_CONNECTOR_POLL_CONNECT; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); if (!radeon_connector->ddc_bus) @@ -1300,13 +1294,13 @@ radeon_add_legacy_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); if (!radeon_connector->ddc_bus) goto failed; + } + if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, @@ -1319,9 +1313,7 @@ radeon_add_legacy_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_9PinDIN: if (radeon_tv == 1) { drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; /* RS400,RC410,RS480 chipset seems to report a lot * of false positive on load detect, we haven't yet @@ -1340,9 +1332,7 @@ radeon_add_legacy_connector(struct drm_device *dev, break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); - if (ret) - goto failed; + drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); if (!radeon_connector->ddc_bus) @@ -1355,6 +1345,11 @@ radeon_add_legacy_connector(struct drm_device *dev, break; } + if (hpd->hpd == RADEON_HPD_NONE) { + if (i2c_bus->valid) + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + } else + connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); return; diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index dc6eba6b96dd..2f042a3c0e62 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) return -EBUSY; } -static void radeon_init_pipes(drm_radeon_private_t *dev_priv) +static void radeon_init_pipes(struct drm_device *dev) { + drm_radeon_private_t *dev_priv = dev->dev_private; uint32_t gb_tile_config, gb_pipe_sel = 0; if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { @@ -434,13 +435,19 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; + /* SE cards have 1 pipe */ + if ((dev->pdev->device == 0x5e4c) || + (dev->pdev->device == 0x5e4f)) + dev_priv->num_gb_pipes = 1; } else { /* R3xx */ - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && + dev->pdev->device != 0x4144) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && + dev->pdev->device != 0x4148)) { dev_priv->num_gb_pipes = 2; } else { - /* R3Vxx */ + /* RV3xx/R300 AD/R350 AH */ dev_priv->num_gb_pipes = 1; } } @@ -736,7 +743,7 @@ static int radeon_do_engine_reset(struct drm_device * dev) /* setup the raster pipes */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) - radeon_init_pipes(dev_priv); + radeon_init_pipes(dev); /* Reset the CP ring */ radeon_do_cp_reset(dev_priv); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 70ba02ed7723..ae0fb7356e62 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) radeon_bo_list_fence(&parser->validated, parser->ib->fence); } radeon_bo_list_unreserve(&parser->validated); - for (i = 0; i < parser->nrelocs; i++) { - if (parser->relocs[i].gobj) - drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); + if (parser->relocs != NULL) { + for (i = 0; i < parser->nrelocs; i++) { + if (parser->relocs[i].gobj) + drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); + } } kfree(parser->track); kfree(parser->relocs); @@ -218,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int r; mutex_lock(&rdev->cs_mutex); - if (rdev->gpu_lockup) { - mutex_unlock(&rdev->cs_mutex); - return -EINVAL; - } /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; @@ -243,7 +241,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } r = radeon_cs_parser_relocs(&parser); if (r) { - DRM_ERROR("Failed to parse relocation !\n"); + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b7023fff89eb..4eb67c0e0996 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -194,7 +194,7 @@ unpin: fail: drm_gem_object_unreference_unlocked(obj); - return 0; + return ret; } int radeon_crtc_cursor_move(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e28e4ed5f720..dd279da90546 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include <linux/console.h> +#include <linux/slab.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> @@ -33,9 +34,56 @@ #include <linux/vga_switcheroo.h> #include "radeon_reg.h" #include "radeon.h" -#include "radeon_asic.h" #include "atom.h" +static const char radeon_family_name[][16] = { + "R100", + "RV100", + "RS100", + "RV200", + "RS200", + "R200", + "RV250", + "RS300", + "RV280", + "R300", + "R350", + "RV350", + "RV380", + "R420", + "R423", + "RV410", + "RS400", + "RS480", + "RS600", + "RS690", + "RS740", + "RV515", + "R520", + "RV530", + "RV560", + "RV570", + "R580", + "R600", + "RV610", + "RV630", + "RV670", + "RV620", + "RV635", + "RS780", + "RS880", + "RV770", + "RV730", + "RV710", + "RV740", + "CEDAR", + "REDWOOD", + "JUNIPER", + "CYPRESS", + "HEMLOCK", + "LAST", +}; + /* * Clear GPU surface registers. */ @@ -178,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { u64 size_af, size_bf; - size_af = 0xFFFFFFFF - mc->vram_end; - size_bf = mc->vram_start; + size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; + size_bf = mc->vram_start & ~mc->gtt_base_align; if (size_bf > size_af) { if (mc->gtt_size > size_bf) { dev_warn(rdev->dev, "limiting GTT\n"); mc->gtt_size = size_bf; } - mc->gtt_start = mc->vram_start - mc->gtt_size; + mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; } else { if (mc->gtt_size > size_af) { dev_warn(rdev->dev, "limiting GTT\n"); mc->gtt_size = size_af; } - mc->gtt_start = mc->vram_end + 1; + mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", @@ -242,6 +290,36 @@ bool radeon_card_posted(struct radeon_device *rdev) } +void radeon_update_bandwidth_info(struct radeon_device *rdev) +{ + fixed20_12 a; + u32 sclk, mclk; + + if (rdev->flags & RADEON_IS_IGP) { + sclk = radeon_get_engine_clock(rdev); + mclk = rdev->clock.default_mclk; + + a.full = dfixed_const(100); + rdev->pm.sclk.full = dfixed_const(sclk); + rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); + rdev->pm.mclk.full = dfixed_const(mclk); + rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + + a.full = dfixed_const(16); + /* core_bandwidth = sclk(Mhz) * 16 */ + rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); + } else { + sclk = radeon_get_engine_clock(rdev); + mclk = radeon_get_memory_clock(rdev); + + a.full = dfixed_const(100); + rdev->pm.sclk.full = dfixed_const(sclk); + rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); + rdev->pm.mclk.full = dfixed_const(mclk); + rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + } +} + bool radeon_boot_test_post_card(struct radeon_device *rdev) { if (radeon_card_posted(rdev)) @@ -288,181 +366,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev) } -/* - * Registers accessors functions. - */ -uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) -{ - DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); - BUG_ON(1); - return 0; -} - -void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", - reg, v); - BUG_ON(1); -} - -void radeon_register_accessor_init(struct radeon_device *rdev) -{ - rdev->mc_rreg = &radeon_invalid_rreg; - rdev->mc_wreg = &radeon_invalid_wreg; - rdev->pll_rreg = &radeon_invalid_rreg; - rdev->pll_wreg = &radeon_invalid_wreg; - rdev->pciep_rreg = &radeon_invalid_rreg; - rdev->pciep_wreg = &radeon_invalid_wreg; - - /* Don't change order as we are overridding accessor. */ - if (rdev->family < CHIP_RV515) { - rdev->pcie_reg_mask = 0xff; - } else { - rdev->pcie_reg_mask = 0x7ff; - } - /* FIXME: not sure here */ - if (rdev->family <= CHIP_R580) { - rdev->pll_rreg = &r100_pll_rreg; - rdev->pll_wreg = &r100_pll_wreg; - } - if (rdev->family >= CHIP_R420) { - rdev->mc_rreg = &r420_mc_rreg; - rdev->mc_wreg = &r420_mc_wreg; - } - if (rdev->family >= CHIP_RV515) { - rdev->mc_rreg = &rv515_mc_rreg; - rdev->mc_wreg = &rv515_mc_wreg; - } - if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - rdev->mc_rreg = &rs400_mc_rreg; - rdev->mc_wreg = &rs400_mc_wreg; - } - if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { - rdev->mc_rreg = &rs690_mc_rreg; - rdev->mc_wreg = &rs690_mc_wreg; - } - if (rdev->family == CHIP_RS600) { - rdev->mc_rreg = &rs600_mc_rreg; - rdev->mc_wreg = &rs600_mc_wreg; - } - if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { - rdev->pciep_rreg = &r600_pciep_rreg; - rdev->pciep_wreg = &r600_pciep_wreg; - } -} - - -/* - * ASIC - */ -int radeon_asic_init(struct radeon_device *rdev) -{ - radeon_register_accessor_init(rdev); - switch (rdev->family) { - case CHIP_R100: - case CHIP_RV100: - case CHIP_RS100: - case CHIP_RV200: - case CHIP_RS200: - rdev->asic = &r100_asic; - break; - case CHIP_R200: - case CHIP_RV250: - case CHIP_RS300: - case CHIP_RV280: - rdev->asic = &r200_asic; - break; - case CHIP_R300: - case CHIP_R350: - case CHIP_RV350: - case CHIP_RV380: - if (rdev->flags & RADEON_IS_PCIE) - rdev->asic = &r300_asic_pcie; - else - rdev->asic = &r300_asic; - break; - case CHIP_R420: - case CHIP_R423: - case CHIP_RV410: - rdev->asic = &r420_asic; - break; - case CHIP_RS400: - case CHIP_RS480: - rdev->asic = &rs400_asic; - break; - case CHIP_RS600: - rdev->asic = &rs600_asic; - break; - case CHIP_RS690: - case CHIP_RS740: - rdev->asic = &rs690_asic; - break; - case CHIP_RV515: - rdev->asic = &rv515_asic; - break; - case CHIP_R520: - case CHIP_RV530: - case CHIP_RV560: - case CHIP_RV570: - case CHIP_R580: - rdev->asic = &r520_asic; - break; - case CHIP_R600: - case CHIP_RV610: - case CHIP_RV630: - case CHIP_RV620: - case CHIP_RV635: - case CHIP_RV670: - case CHIP_RS780: - case CHIP_RS880: - rdev->asic = &r600_asic; - break; - case CHIP_RV770: - case CHIP_RV730: - case CHIP_RV710: - case CHIP_RV740: - rdev->asic = &rv770_asic; - break; - case CHIP_CEDAR: - case CHIP_REDWOOD: - case CHIP_JUNIPER: - case CHIP_CYPRESS: - case CHIP_HEMLOCK: - rdev->asic = &evergreen_asic; - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; - } - - if (rdev->flags & RADEON_IS_IGP) { - rdev->asic->get_memory_clock = NULL; - rdev->asic->set_memory_clock = NULL; - } - - return 0; -} - - -/* - * Wrapper around modesetting bits. - */ -int radeon_clocks_init(struct radeon_device *rdev) -{ - int r; - - r = radeon_static_clocks_init(rdev->ddev); - if (r) { - return r; - } - DRM_INFO("Clocks initialized !\n"); - return 0; -} - -void radeon_clocks_fini(struct radeon_device *rdev) -{ -} - /* ATOM accessor methods */ static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) { @@ -567,29 +470,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -void radeon_agp_disable(struct radeon_device *rdev) -{ - rdev->flags &= ~RADEON_IS_AGP; - if (rdev->family >= CHIP_R600) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - } else if (rdev->family >= CHIP_RV515 || - rdev->family == CHIP_RV380 || - rdev->family == CHIP_RV410 || - rdev->family == CHIP_R423) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; - rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; - } else { - DRM_INFO("Forcing AGP to PCI mode\n"); - rdev->flags |= RADEON_IS_PCI; - rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; - rdev->asic->gart_set_page = &r100_pci_gart_set_page; - } - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; -} - void radeon_check_arguments(struct radeon_device *rdev) { /* vramlimit must be a power of two */ @@ -666,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ rdev->powered_down = false; radeon_resume_kms(dev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_INFO "radeon: switched off\n"); + drm_kms_helper_poll_disable(dev); radeon_suspend_kms(dev, pmm); /* don't suspend or resume card normally */ rdev->powered_down = true; @@ -694,7 +576,6 @@ int radeon_device_init(struct radeon_device *rdev, int r; int dma_bits; - DRM_INFO("radeon: Initializing kernel modesetting.\n"); rdev->shutdown = false; rdev->dev = &pdev->dev; rdev->ddev = ddev; @@ -706,6 +587,10 @@ int radeon_device_init(struct radeon_device *rdev, rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->gpu_lockup = false; rdev->accel_working = false; + + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", + radeon_family_name[rdev->family], pdev->vendor, pdev->device); + /* mutex initialization are all done here so we * can recall function without having locking issues */ mutex_init(&rdev->cs_mutex); @@ -716,9 +601,11 @@ int radeon_device_init(struct radeon_device *rdev, spin_lock_init(&rdev->ih.lock); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); + mutex_init(&rdev->vram_mutex); rwlock_init(&rdev->fence_drv.lock); INIT_LIST_HEAD(&rdev->gem.objects); init_waitqueue_head(&rdev->irq.vblank_queue); + init_waitqueue_head(&rdev->irq.idle_queue); /* setup workqueue */ rdev->wq = create_workqueue("radeon"); @@ -731,6 +618,14 @@ int radeon_device_init(struct radeon_device *rdev, return r; radeon_check_arguments(rdev); + /* all of the newer IGP chips have an internal gart + * However some rs4xx report as AGP, so remove that here. + */ + if ((rdev->family >= CHIP_RS400) && + (rdev->flags & RADEON_IS_IGP)) { + rdev->flags &= ~RADEON_IS_AGP; + } + if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { radeon_agp_disable(rdev); } @@ -780,7 +675,7 @@ int radeon_device_init(struct radeon_device *rdev, /* Acceleration not working on AGP card try again * with fallback to PCI or PCIE GART */ - radeon_gpu_reset(rdev); + radeon_asic_reset(rdev); radeon_fini(rdev); radeon_agp_disable(rdev); r = radeon_init(rdev); @@ -800,6 +695,8 @@ void radeon_device_fini(struct radeon_device *rdev) { DRM_INFO("radeon: finishing device.\n"); rdev->shutdown = true; + /* evict vram memory */ + radeon_bo_evict_vram(rdev); radeon_fini(rdev); destroy_workqueue(rdev->wq); vga_switcheroo_unregister_client(rdev->pdev); @@ -816,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) { struct radeon_device *rdev; struct drm_crtc *crtc; + struct drm_connector *connector; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -828,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) if (rdev->powered_down) return 0; + + /* turn off display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); @@ -837,9 +741,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) continue; } robj = rfb->obj->driver_private; - if (robj != rdev->fbdev_rbo) { + /* don't unpin kernel fb objects */ + if (!radeon_fbdev_robj_is_fb(rdev, robj)) { r = radeon_bo_reserve(robj, false); - if (unlikely(r == 0)) { + if (r == 0) { radeon_bo_unpin(robj); radeon_bo_unreserve(robj); } @@ -852,11 +757,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) radeon_save_bios_scratch_regs(rdev); + radeon_pm_suspend(rdev); radeon_suspend(rdev); radeon_hpd_fini(rdev); /* evict remaining vram memory */ radeon_bo_evict_vram(rdev); + radeon_agp_suspend(rdev); + pci_save_state(dev->pdev); if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ @@ -864,13 +772,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) pci_set_power_state(dev->pdev, PCI_D3hot); } acquire_console_sem(); - fb_set_suspend(rdev->fbdev_info, 1); + radeon_fbdev_set_suspend(rdev, 1); release_console_sem(); return 0; } int radeon_resume_kms(struct drm_device *dev) { + struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; if (rdev->powered_down) @@ -887,8 +796,15 @@ int radeon_resume_kms(struct drm_device *dev) /* resume AGP if in use */ radeon_agp_resume(rdev); radeon_resume(rdev); + radeon_pm_resume(rdev); radeon_restore_bios_scratch_regs(rdev); - fb_set_suspend(rdev->fbdev_info, 0); + + /* turn on display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + + radeon_fbdev_set_suspend(rdev, 0); release_console_sem(); /* reset hpd state */ @@ -898,6 +814,26 @@ int radeon_resume_kms(struct drm_device *dev) return 0; } +int radeon_gpu_reset(struct radeon_device *rdev) +{ + int r; + + radeon_save_bios_scratch_regs(rdev); + radeon_suspend(rdev); + + r = radeon_asic_reset(rdev); + if (!r) { + dev_info(rdev->dev, "GPU reset succeed\n"); + radeon_resume(rdev); + radeon_restore_bios_scratch_regs(rdev); + drm_helper_resume_force_mode(rdev->ddev); + return 0; + } + /* bad news, how to tell it to userspace ? */ + dev_info(rdev->dev, "GPU reset failed\n"); + return r; +} + /* * Debugfs diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ba8d806dcf39..8154cdf796e4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); - WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); - WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); + WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); - WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); + WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); for (i = 0; i < 256; i++) { - WREG32(EVERGREEN_DC_LUT_30_COLOR, + WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); @@ -284,8 +284,7 @@ static const char *connector_names[15] = { "eDP", }; -static const char *hpd_names[7] = { - "NONE", +static const char *hpd_names[6] = { "HPD1", "HPD2", "HPD3", @@ -368,10 +367,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) if (rdev->bios) { if (rdev->is_atom_bios) { - if (rdev->family >= CHIP_R600) + ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); + if (ret == false) ret = radeon_get_atom_connector_info_from_object_table(dev); - else - ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); } else { ret = radeon_get_legacy_connector_info_from_bios(dev); if (ret == false) @@ -469,10 +467,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, uint32_t best_error = 0xffffffff; uint32_t best_vco_diff = 1; uint32_t post_div; + u32 pll_out_min, pll_out_max; DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; + if (pll->flags & RADEON_PLL_IS_LCD) { + pll_out_min = pll->lcd_pll_out_min; + pll_out_max = pll->lcd_pll_out_max; + } else { + pll_out_min = pll->pll_out_min; + pll_out_max = pll->pll_out_max; + } + if (pll->flags & RADEON_PLL_USE_REF_DIV) min_ref_div = max_ref_div = pll->reference_div; else { @@ -536,10 +543,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, tmp = (uint64_t)pll->reference_freq * feedback_div; vco = radeon_div(tmp, ref_div); - if (vco < pll->pll_out_min) { + if (vco < pll_out_min) { min_feed_div = feedback_div + 1; continue; - } else if (vco > pll->pll_out_max) { + } else if (vco > pll_out_max) { max_feed_div = feedback_div; continue; } @@ -625,37 +632,37 @@ calc_fb_div(struct radeon_pll *pll, vco_freq = freq * post_div; /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ - a.full = rfixed_const(pll->reference_freq); - feedback_divider.full = rfixed_const(vco_freq); - feedback_divider.full = rfixed_div(feedback_divider, a); - a.full = rfixed_const(ref_div); - feedback_divider.full = rfixed_mul(feedback_divider, a); + a.full = dfixed_const(pll->reference_freq); + feedback_divider.full = dfixed_const(vco_freq); + feedback_divider.full = dfixed_div(feedback_divider, a); + a.full = dfixed_const(ref_div); + feedback_divider.full = dfixed_mul(feedback_divider, a); if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ - a.full = rfixed_const(10); - feedback_divider.full = rfixed_mul(feedback_divider, a); - feedback_divider.full += rfixed_const_half(0); - feedback_divider.full = rfixed_floor(feedback_divider); - feedback_divider.full = rfixed_div(feedback_divider, a); + a.full = dfixed_const(10); + feedback_divider.full = dfixed_mul(feedback_divider, a); + feedback_divider.full += dfixed_const_half(0); + feedback_divider.full = dfixed_floor(feedback_divider); + feedback_divider.full = dfixed_div(feedback_divider, a); /* *fb_div = floor(feedback_divider); */ - a.full = rfixed_floor(feedback_divider); - *fb_div = rfixed_trunc(a); + a.full = dfixed_floor(feedback_divider); + *fb_div = dfixed_trunc(a); /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ - a.full = rfixed_const(10); - b.full = rfixed_mul(feedback_divider, a); + a.full = dfixed_const(10); + b.full = dfixed_mul(feedback_divider, a); - feedback_divider.full = rfixed_floor(feedback_divider); - feedback_divider.full = rfixed_mul(feedback_divider, a); + feedback_divider.full = dfixed_floor(feedback_divider); + feedback_divider.full = dfixed_mul(feedback_divider, a); feedback_divider.full = b.full - feedback_divider.full; - *fb_div_frac = rfixed_trunc(feedback_divider); + *fb_div_frac = dfixed_trunc(feedback_divider); } else { /* *fb_div = floor(feedback_divider + 0.5); */ - feedback_divider.full += rfixed_const_half(0); - feedback_divider.full = rfixed_floor(feedback_divider); + feedback_divider.full += dfixed_const_half(0); + feedback_divider.full = dfixed_floor(feedback_divider); - *fb_div = rfixed_trunc(feedback_divider); + *fb_div = dfixed_trunc(feedback_divider); *fb_div_frac = 0; } @@ -675,24 +682,33 @@ calc_fb_ref_div(struct radeon_pll *pll, { fixed20_12 ffreq, max_error, error, pll_out, a; u32 vco; + u32 pll_out_min, pll_out_max; - ffreq.full = rfixed_const(freq); + if (pll->flags & RADEON_PLL_IS_LCD) { + pll_out_min = pll->lcd_pll_out_min; + pll_out_max = pll->lcd_pll_out_max; + } else { + pll_out_min = pll->pll_out_min; + pll_out_max = pll->pll_out_max; + } + + ffreq.full = dfixed_const(freq); /* max_error = ffreq * 0.0025; */ - a.full = rfixed_const(400); - max_error.full = rfixed_div(ffreq, a); + a.full = dfixed_const(400); + max_error.full = dfixed_div(ffreq, a); for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); vco = vco / ((*ref_div) * 10); - if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) + if ((vco < pll_out_min) || (vco > pll_out_max)) continue; /* pll_out = vco / post_div; */ - a.full = rfixed_const(post_div); - pll_out.full = rfixed_const(vco); - pll_out.full = rfixed_div(pll_out, a); + a.full = dfixed_const(post_div); + pll_out.full = dfixed_const(vco); + pll_out.full = dfixed_div(pll_out, a); if (pll_out.full >= ffreq.full) { error.full = pll_out.full - ffreq.full; @@ -714,6 +730,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, { u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; u32 best_freq = 0, vco_frequency; + u32 pll_out_min, pll_out_max; + + if (pll->flags & RADEON_PLL_IS_LCD) { + pll_out_min = pll->lcd_pll_out_min; + pll_out_max = pll->lcd_pll_out_max; + } else { + pll_out_min = pll->pll_out_min; + pll_out_max = pll->pll_out_max; + } /* freq = freq / 10; */ do_div(freq, 10); @@ -724,7 +749,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, goto done; vco_frequency = freq * post_div; - if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) + if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) goto done; if (pll->flags & RADEON_PLL_USE_REF_DIV) { @@ -749,7 +774,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll, continue; vco_frequency = freq * post_div; - if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) + if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) continue; if (pll->flags & RADEON_PLL_USE_REF_DIV) { ref_div = pll->reference_div; @@ -805,10 +830,6 @@ void radeon_compute_pll(struct radeon_pll *pll, static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - struct drm_device *dev = fb->dev; - - if (fb->fbdev) - radeonfb_remove(dev, fb); if (radeon_fb->obj) drm_gem_object_unreference_unlocked(radeon_fb->obj); @@ -830,21 +851,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { .create_handle = radeon_user_framebuffer_create_handle, }; -struct drm_framebuffer * -radeon_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj) +void +radeon_framebuffer_init(struct drm_device *dev, + struct radeon_framebuffer *rfb, + struct drm_mode_fb_cmd *mode_cmd, + struct drm_gem_object *obj) { - struct radeon_framebuffer *radeon_fb; - - radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) { - return NULL; - } - drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); - drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); - radeon_fb->obj = obj; - return &radeon_fb->base; + rfb->obj = obj; + drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); + drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); } static struct drm_framebuffer * @@ -853,6 +868,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd) { struct drm_gem_object *obj; + struct radeon_framebuffer *radeon_fb; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); if (obj == NULL) { @@ -860,12 +876,26 @@ radeon_user_framebuffer_create(struct drm_device *dev, "can't create framebuffer\n", mode_cmd->handle); return NULL; } - return radeon_framebuffer_create(dev, mode_cmd, obj); + + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); + if (radeon_fb == NULL) { + return NULL; + } + + radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); + + return &radeon_fb->base; +} + +static void radeon_output_poll_changed(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + radeon_fb_output_poll_changed(rdev); } static const struct drm_mode_config_funcs radeon_mode_funcs = { .fb_create = radeon_user_framebuffer_create, - .fb_changed = radeonfb_probe, + .output_poll_changed = radeon_output_poll_changed }; struct drm_prop_enum_list { @@ -945,6 +975,26 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) return 0; } +void radeon_update_display_priority(struct radeon_device *rdev) +{ + /* adjustment options for the display watermarks */ + if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { + /* set display priority to high for r3xx, rv515 chips + * this avoids flickering due to underflow to the + * display controllers during heavy acceleration. + * Don't force high on rs4xx igp chips as it seems to + * affect the sound card. See kernel bug 15982. + */ + if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && + !(rdev->flags & RADEON_IS_IGP)) + rdev->disp_priority = 2; + else + rdev->disp_priority = 0; + } else + rdev->disp_priority = radeon_disp_priority; + +} + int radeon_modeset_init(struct radeon_device *rdev) { int i; @@ -976,15 +1026,6 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_combios_check_hardcoded_edid(rdev); } - if (rdev->flags & RADEON_SINGLE_CRTC) - rdev->num_crtc = 1; - else { - if (ASIC_IS_DCE4(rdev)) - rdev->num_crtc = 6; - else - rdev->num_crtc = 2; - } - /* allocate crtcs */ for (i = 0; i < rdev->num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); @@ -997,15 +1038,24 @@ int radeon_modeset_init(struct radeon_device *rdev) } /* initialize hpd */ radeon_hpd_init(rdev); - drm_helper_initial_config(rdev->ddev); + + /* Initialize power management */ + radeon_pm_init(rdev); + + radeon_fbdev_init(rdev); + drm_kms_helper_poll_init(rdev->ddev); + return 0; } void radeon_modeset_fini(struct radeon_device *rdev) { + radeon_fbdev_fini(rdev); kfree(rdev->mode_info.bios_hardcoded_edid); + radeon_pm_fini(rdev); if (rdev->mode_info.mode_config_initialized) { + drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; @@ -1055,15 +1105,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } if (radeon_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; - a.full = rfixed_const(crtc->mode.vdisplay); - b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); - radeon_crtc->vsc.full = rfixed_div(a, b); - a.full = rfixed_const(crtc->mode.hdisplay); - b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); - radeon_crtc->hsc.full = rfixed_div(a, b); + a.full = dfixed_const(crtc->mode.vdisplay); + b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); + radeon_crtc->vsc.full = dfixed_div(a, b); + a.full = dfixed_const(crtc->mode.hdisplay); + b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); + radeon_crtc->hsc.full = dfixed_div(a, b); } else { - radeon_crtc->vsc.full = rfixed_const(1); - radeon_crtc->hsc.full = rfixed_const(1); + radeon_crtc->vsc.full = dfixed_const(1); + radeon_crtc->hsc.full = dfixed_const(1); } return true; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6eec0ece6a6c..e166fe4d7c30 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -42,9 +42,13 @@ * KMS wrapper. * - 2.0.0 - initial interface * - 2.1.0 - add square tiling interface + * - 2.2.0 - add r6xx/r7xx const buffer support + * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs + * - 2.4.0 - add crtc id query + * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 1 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -89,8 +93,9 @@ int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; int radeon_new_pll = -1; -int radeon_dynpm = -1; int radeon_audio = 1; +int radeon_disp_priority = 0; +int radeon_hw_i2c = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -128,12 +133,15 @@ module_param_named(tv, radeon_tv, int, 0444); MODULE_PARM_DESC(new_pll, "Select new PLL code"); module_param_named(new_pll, radeon_new_pll, int, 0444); -MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); -module_param_named(dynpm, radeon_dynpm, int, 0444); - MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); module_param_named(audio, radeon_audio, int, 0444); +MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); +module_param_named(disp_priority, radeon_disp_priority, int, 0444); + +MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); +module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); + static int radeon_suspend(struct drm_device *dev, pm_message_t state) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -206,6 +214,7 @@ static struct drm_driver driver_old = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_compat_ioctl, #endif @@ -294,6 +303,7 @@ static struct drm_driver kms_driver = { .mmap = radeon_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = radeon_kms_compat_ioctl, #endif diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index ec55f2b23c22..448eba89d1e6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -107,9 +107,10 @@ * 1.30- Add support for occlusion queries * 1.31- Add support for num Z pipes from GET_PARAM * 1.32- fixes for rv740 setup + * 1.33- Add r6xx/r7xx const buffer support */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 32 +#define DRIVER_MINOR 33 #define DRIVER_PATCHLEVEL 0 enum radeon_cp_microcode_version { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index bc926ea0a530..e0b30b264c28 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -254,6 +254,53 @@ radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) return dig_connector; } +void radeon_panel_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_display_mode *native_mode = &radeon_encoder->native_mode; + unsigned hblank = native_mode->htotal - native_mode->hdisplay; + unsigned vblank = native_mode->vtotal - native_mode->vdisplay; + unsigned hover = native_mode->hsync_start - native_mode->hdisplay; + unsigned vover = native_mode->vsync_start - native_mode->vdisplay; + unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; + unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; + + adjusted_mode->clock = native_mode->clock; + adjusted_mode->flags = native_mode->flags; + + if (ASIC_IS_AVIVO(rdev)) { + adjusted_mode->hdisplay = native_mode->hdisplay; + adjusted_mode->vdisplay = native_mode->vdisplay; + } + + adjusted_mode->htotal = native_mode->hdisplay + hblank; + adjusted_mode->hsync_start = native_mode->hdisplay + hover; + adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width; + + adjusted_mode->vtotal = native_mode->vdisplay + vblank; + adjusted_mode->vsync_start = native_mode->vdisplay + vover; + adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width; + + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); + + if (ASIC_IS_AVIVO(rdev)) { + adjusted_mode->crtc_hdisplay = native_mode->hdisplay; + adjusted_mode->crtc_vdisplay = native_mode->vdisplay; + } + + adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank; + adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover; + adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width; + + adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank; + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover; + adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width; + +} + static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -262,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); - /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); @@ -275,18 +319,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; /* get the native mode for LVDS */ - if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { - struct drm_display_mode *native_mode = &radeon_encoder->native_mode; - int mode_id = adjusted_mode->base.id; - *adjusted_mode = *native_mode; - if (!ASIC_IS_AVIVO(rdev)) { - adjusted_mode->hdisplay = mode->hdisplay; - adjusted_mode->vdisplay = mode->vdisplay; - adjusted_mode->crtc_hdisplay = mode->hdisplay; - adjusted_mode->crtc_vdisplay = mode->vdisplay; - } - adjusted_mode->base.id = mode_id; - } + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) + radeon_panel_mode_fixup(encoder, adjusted_mode); /* get the native mode for TV */ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { @@ -302,7 +336,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, } if (ASIC_IS_DCE3(rdev) && - (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { + (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); radeon_dp_set_link_config(connector, mode); } @@ -317,12 +351,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; - int index = 0, num = 0; + int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (dac_info->tv_std) - tv_std = dac_info->tv_std; memset(&args, 0, sizeof(args)); @@ -330,12 +360,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); - num = 1; break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); - num = 2; break; } @@ -346,7 +374,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.ucDacStandard = ATOM_DAC1_CV; else { - switch (tv_std) { + switch (dac_info->tv_std) { case TV_STD_PAL: case TV_STD_PAL_M: case TV_STD_SCART_PAL: @@ -377,10 +405,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) TV_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (dac_info->tv_std) - tv_std = dac_info->tv_std; memset(&args, 0, sizeof(args)); @@ -391,7 +415,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.sTVEncoder.ucTvStandard = ATOM_TV_CV; else { - switch (tv_std) { + switch (dac_info->tv_std) { case TV_STD_NTSC: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; @@ -519,7 +543,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) break; } - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; switch (frev) { case 1: @@ -593,7 +618,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - r600_hdmi_enable(encoder, hdmi_detected); } int @@ -708,7 +732,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) struct radeon_connector_atom_dig *dig_connector = radeon_get_atom_connector_priv_from_encoder(encoder); union dig_encoder_control args; - int index = 0, num = 0; + int index = 0; uint8_t frev, crev; if (!dig || !dig_connector) @@ -724,9 +748,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) else index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); } - num = dig->dig_encoder + 1; - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; args.v1.ucAction = action; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); @@ -785,7 +809,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t struct drm_connector *connector; struct radeon_connector *radeon_connector; union dig_transmitter_control args; - int index = 0, num = 0; + int index = 0; uint8_t frev, crev; bool is_dp = false; int pll_id = 0; @@ -814,7 +838,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } } - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { @@ -860,15 +885,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v3.acConfig.ucTransmitterSel = 0; - num = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v3.acConfig.ucTransmitterSel = 1; - num = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v3.acConfig.ucTransmitterSel = 2; - num = 2; break; } @@ -877,25 +899,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v3.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v3.acConfig.fDualLinkConnector = 1; } } else if (ASIC_IS_DCE32(rdev)) { - if (dig->dig_encoder == 1) - args.v2.acConfig.ucEncoderSel = 1; + args.v2.acConfig.ucEncoderSel = dig->dig_encoder; if (dig_connector->linkb) args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v2.acConfig.ucTransmitterSel = 0; - num = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v2.acConfig.ucTransmitterSel = 1; - num = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v2.acConfig.ucTransmitterSel = 2; - num = 2; break; } @@ -904,6 +924,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v2.acConfig.fDualLinkConnector = 1; } } else { args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; @@ -913,31 +935,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (rdev->flags & RADEON_IS_IGP) { - if (radeon_encoder->pixel_clock > 165000) { - if (dig_connector->igp_lane_info & 0x3) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; - else if (dig_connector->igp_lane_info & 0xc) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; - } else { - if (dig_connector->igp_lane_info & 0x1) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else if (dig_connector->igp_lane_info & 0x2) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; - else if (dig_connector->igp_lane_info & 0x4) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; - else if (dig_connector->igp_lane_info & 0x8) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; - } + if ((rdev->flags & RADEON_IS_IGP) && + (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { + if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { + if (dig_connector->igp_lane_info & 0x1) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; + else if (dig_connector->igp_lane_info & 0x2) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; + else if (dig_connector->igp_lane_info & 0x4) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; + else if (dig_connector->igp_lane_info & 0x8) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; + } else { + if (dig_connector->igp_lane_info & 0x3) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; + else if (dig_connector->igp_lane_info & 0xc) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } - break; } - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; - if (dig_connector->linkb) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; else @@ -948,6 +964,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; + if (radeon_encoder->pixel_clock > 165000) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; } } @@ -1054,16 +1072,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); - { + if (!ASIC_IS_DCE4(rdev)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + dp_link_train(encoder, connector); + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + if (!ASIC_IS_DCE4(rdev)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); + } break; } } else { @@ -1081,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) } radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } union crtc_source_param { @@ -1104,7 +1129,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) memset(&args, 0, sizeof(args)); - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; switch (frev) { case 1: @@ -1216,6 +1242,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + /* update scratch regs with new routing */ + radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void @@ -1326,20 +1355,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - if (radeon_encoder->active_device & - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (dig) - dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); - } radeon_encoder->pixel_clock = adjusted_mode->clock; - radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); - atombios_set_encoder_crtc_source(encoder); - - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); else @@ -1390,15 +1409,20 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_ENABLE); - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - atombios_tv_setup(encoder, ATOM_ENABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + atombios_tv_setup(encoder, ATOM_ENABLE); + else + atombios_tv_setup(encoder, ATOM_DISABLE); + } break; } atombios_apply_encoder_quirks(encoder, adjusted_mode); - /* XXX */ - if (!ASIC_IS_DCE4(rdev)) + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + r600_hdmi_enable(encoder); r600_hdmi_setmode(encoder, adjusted_mode); + } } static bool @@ -1418,7 +1442,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn memset(&args, 0, sizeof(args)); - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return false; args.sDacload.ucMisc = 0; @@ -1492,8 +1517,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + if (dig) + dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); + } + radeon_atom_output_lock(encoder, true); radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + /* this is needed for the pll/ss setup to work correctly in some cases */ + atombios_set_encoder_crtc_source(encoder); } static void radeon_atom_encoder_commit(struct drm_encoder *encoder) @@ -1504,11 +1541,52 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) static void radeon_atom_encoder_disable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + if (ASIC_IS_DCE4(rdev)) + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE); + } + break; + case ENCODER_OBJECT_ID_INTERNAL_DDI: + atombios_ddia_setup(encoder, ATOM_DISABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + atombios_external_tmds_setup(encoder, ATOM_DISABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + atombios_dac_setup(encoder, ATOM_DISABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + atombios_tv_setup(encoder, ATOM_DISABLE); + break; + } + if (radeon_encoder_is_digital(encoder)) { + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + r600_hdmi_disable(encoder); dig = radeon_encoder->enc_priv; dig->dig_encoder = -1; } @@ -1549,12 +1627,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = { struct radeon_encoder_atom_dac * radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) { + struct drm_device *dev = radeon_encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); if (!dac) return NULL; - dac->tv_std = TV_STD_NTSC; + dac->tv_std = radeon_atombios_get_tv_info(rdev); return dac; } @@ -1632,6 +1712,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: @@ -1659,6 +1740,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; } - - r600_hdmi_init(encoder); } diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 93c7d5d41914..e329066dcabd 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -36,7 +36,7 @@ * Radeon chip families */ enum radeon_family { - CHIP_R100, + CHIP_R100 = 0, CHIP_RV100, CHIP_RS100, CHIP_RV200, @@ -99,4 +99,5 @@ enum radeon_chip_flags { RADEON_IS_PCI = 0x00800000UL, RADEON_IS_IGPGART = 0x01000000UL, }; + #endif diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 8fccbf29235e..dc1634bb0c11 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -23,11 +23,8 @@ * Authors: * David Airlie */ - /* - * Modularization - */ - #include <linux/module.h> +#include <linux/slab.h> #include <linux/fb.h> #include "drmP.h" @@ -41,17 +38,21 @@ #include <linux/vga_switcheroo.h> -struct radeon_fb_device { +/* object hierarchy - + this contains a helper + a radeon fb + the helper contains a pointer to radeon framebuffer baseclass. +*/ +struct radeon_fbdev { struct drm_fb_helper helper; - struct radeon_framebuffer *rfb; - struct radeon_device *rdev; + struct radeon_framebuffer rfb; + struct list_head fbdev_list; + struct radeon_device *rdev; }; static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -60,45 +61,6 @@ static struct fb_ops radeonfb_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; -/** - * Currently it is assumed that the old framebuffer is reused. - * - * LOCKING - * caller should hold the mode config lock. - * - */ -int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) -{ - struct fb_info *info; - struct drm_framebuffer *fb; - struct drm_display_mode *mode = crtc->desired_mode; - - fb = crtc->fb; - if (fb == NULL) { - return 1; - } - info = fb->fbdev; - if (info == NULL) { - return 1; - } - if (mode == NULL) { - return 1; - } - info->var.xres = mode->hdisplay; - info->var.right_margin = mode->hsync_start - mode->hdisplay; - info->var.hsync_len = mode->hsync_end - mode->hsync_start; - info->var.left_margin = mode->htotal - mode->hsync_end; - info->var.yres = mode->vdisplay; - info->var.lower_margin = mode->vsync_start - mode->vdisplay; - info->var.vsync_len = mode->vsync_end - mode->vsync_start; - info->var.upper_margin = mode->vtotal - mode->vsync_end; - info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; - /* avoid overflow */ - info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; - - return 0; -} -EXPORT_SYMBOL(radeonfb_resize); static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { @@ -124,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo return aligned; } -static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { - .gamma_set = radeon_crtc_fb_gamma_set, - .gamma_get = radeon_crtc_fb_gamma_get, -}; +static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) +{ + struct radeon_bo *rbo = gobj->driver_private; + int ret; + + ret = radeon_bo_reserve(rbo, false); + if (likely(ret == 0)) { + radeon_bo_kunmap(rbo); + radeon_bo_unreserve(rbo); + } + drm_gem_object_unreference_unlocked(gobj); +} -int radeonfb_create(struct drm_device *dev, - uint32_t fb_width, uint32_t fb_height, - uint32_t surface_width, uint32_t surface_height, - uint32_t surface_depth, uint32_t surface_bpp, - struct drm_framebuffer **fb_p) +static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, + struct drm_mode_fb_cmd *mode_cmd, + struct drm_gem_object **gobj_p) { - struct radeon_device *rdev = dev->dev_private; - struct fb_info *info; - struct radeon_fb_device *rfbdev; - struct drm_framebuffer *fb = NULL; - struct radeon_framebuffer *rfb; - struct drm_mode_fb_cmd mode_cmd; + struct radeon_device *rdev = rfbdev->rdev; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; - struct device *device = &rdev->pdev->dev; - int size, aligned_size, ret; - u64 fb_gpuaddr; - void *fbptr = NULL; - unsigned long tmp; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; + int ret; + int aligned_size, size; - mode_cmd.width = surface_width; - mode_cmd.height = surface_height; - - /* avivo can't scanout real 24bpp */ - if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) - surface_bpp = 32; - - mode_cmd.bpp = surface_bpp; /* need to align pitch with crtc limits */ - mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); - mode_cmd.depth = surface_depth; + mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); - size = mode_cmd.pitch * mode_cmd.height; + size = mode_cmd->pitch * mode_cmd->height; aligned_size = ALIGN(size, PAGE_SIZE); - ret = radeon_gem_object_create(rdev, aligned_size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_kernel, - &gobj); + RADEON_GEM_DOMAIN_VRAM, + false, ttm_bo_type_kernel, + &gobj); if (ret) { - printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", - surface_width, surface_height); - ret = -ENOMEM; - goto out; + printk(KERN_ERR "failed to allocate framebuffer (%d)\n", + aligned_size); + return -ENOMEM; } rbo = gobj->driver_private; @@ -182,7 +131,7 @@ int radeonfb_create(struct drm_device *dev, tiling_flags = RADEON_TILING_MACRO; #ifdef __BIG_ENDIAN - switch (mode_cmd.bpp) { + switch (mode_cmd->bpp) { case 32: tiling_flags |= RADEON_TILING_SWAP_32BIT; break; @@ -195,57 +144,81 @@ int radeonfb_create(struct drm_device *dev, if (tiling_flags) { ret = radeon_bo_set_tiling_flags(rbo, - tiling_flags | RADEON_TILING_SURFACE, - mode_cmd.pitch); + tiling_flags | RADEON_TILING_SURFACE, + mode_cmd->pitch); if (ret) dev_err(rdev->dev, "FB failed to set tiling flags\n"); } - mutex_lock(&rdev->ddev->struct_mutex); - fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); - if (fb == NULL) { - DRM_ERROR("failed to allocate fb.\n"); - ret = -ENOMEM; - goto out_unref; - } + + ret = radeon_bo_reserve(rbo, false); if (unlikely(ret != 0)) goto out_unref; - ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); + ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL); if (ret) { radeon_bo_unreserve(rbo); goto out_unref; } if (fb_tiled) radeon_bo_check_tiling(rbo, 0, 0); - ret = radeon_bo_kmap(rbo, &fbptr); + ret = radeon_bo_kmap(rbo, NULL); radeon_bo_unreserve(rbo); if (ret) { goto out_unref; } - list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); + *gobj_p = gobj; + return 0; +out_unref: + radeonfb_destroy_pinned_object(gobj); + *gobj_p = NULL; + return ret; +} - *fb_p = fb; - rfb = to_radeon_framebuffer(fb); - rdev->fbdev_rfb = rfb; - rdev->fbdev_rbo = rbo; +static int radeonfb_create(struct radeon_fbdev *rfbdev, + struct drm_fb_helper_surface_size *sizes) +{ + struct radeon_device *rdev = rfbdev->rdev; + struct fb_info *info; + struct drm_framebuffer *fb = NULL; + struct drm_mode_fb_cmd mode_cmd; + struct drm_gem_object *gobj = NULL; + struct radeon_bo *rbo = NULL; + struct device *device = &rdev->pdev->dev; + int ret; + unsigned long tmp; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + /* avivo can't scanout real 24bpp */ + if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) + sizes->surface_bpp = 32; - info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); + mode_cmd.bpp = sizes->surface_bpp; + mode_cmd.depth = sizes->surface_depth; + + ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); + rbo = gobj->driver_private; + + /* okay we have an object now allocate the framebuffer */ + info = framebuffer_alloc(0, device); if (info == NULL) { ret = -ENOMEM; goto out_unref; } - rdev->fbdev_info = info; - rfbdev = info->par; - rfbdev->helper.funcs = &radeon_fb_helper_funcs; - rfbdev->helper.dev = dev; - ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc, - RADEONFB_CONN_LIMIT); - if (ret) - goto out_unref; + info->par = rfbdev; + + radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); + + fb = &rfbdev->rfb.base; + + /* setup helper */ + rfbdev->helper.fb = fb; + rfbdev->helper.fbdev = info; - memset_io(fbptr, 0x0, aligned_size); + memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); strcpy(info->fix.id, "radeondrmfb"); @@ -254,17 +227,22 @@ int radeonfb_create(struct drm_device *dev, info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; - tmp = fb_gpuaddr - rdev->mc.vram_start; + tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; - info->fix.smem_len = size; - info->screen_base = fbptr; - info->screen_size = size; + info->fix.smem_len = radeon_bo_size(rbo); + info->screen_base = rbo->kptr; + info->screen_size = radeon_bo_size(rbo); - drm_fb_helper_fill_var(info, fb, fb_width, fb_height); + drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->aperture_base = rdev->ddev->mode_config.fb_base; - info->aperture_size = rdev->mc.real_vram_size; + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out_unref; + } + info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; + info->apertures->ranges[0].size = rdev->mc.real_vram_size; info->fix.mmio_start = 0; info->fix.mmio_len = 0; @@ -273,44 +251,55 @@ int radeonfb_create(struct drm_device *dev, info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; + if (info->screen_base == NULL) { ret = -ENOSPC; goto out_unref; } + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out_unref; + } + DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); - DRM_INFO("size %lu\n", (unsigned long)size); + DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitch); - fb->fbdev = info; - rfbdev->rfb = rfb; - rfbdev->rdev = rdev; - - mutex_unlock(&rdev->ddev->struct_mutex); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out_unref: if (rbo) { - ret = radeon_bo_reserve(rbo, false); - if (likely(ret == 0)) { - radeon_bo_kunmap(rbo); - radeon_bo_unreserve(rbo); - } + } if (fb && ret) { - list_del(&fb->filp_head); drm_gem_object_unreference(gobj); drm_framebuffer_cleanup(fb); kfree(fb); } - drm_gem_object_unreference(gobj); - mutex_unlock(&rdev->ddev->struct_mutex); -out: return ret; } +static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; + int new_fb = 0; + int ret; + + if (!helper->fb) { + ret = radeonfb_create(rfbdev, sizes); + if (ret) + return ret; + new_fb = 1; + } + return new_fb; +} + static char *mode_option; int radeon_parse_options(char *options) { @@ -327,46 +316,108 @@ int radeon_parse_options(char *options) return 0; } -int radeonfb_probe(struct drm_device *dev) +void radeon_fb_output_poll_changed(struct radeon_device *rdev) { - struct radeon_device *rdev = dev->dev_private; - int bpp_sel = 32; - - /* select 8 bpp console on RN50 or 16MB cards */ - if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) - bpp_sel = 8; - - return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); + drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); } -int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) +static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct fb_info *info; - struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); + struct radeon_framebuffer *rfb = &rfbdev->rfb; struct radeon_bo *rbo; int r; - if (!fb) { - return -EINVAL; + if (rfbdev->helper.fbdev) { + info = rfbdev->helper.fbdev; + + unregister_framebuffer(info); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); } - info = fb->fbdev; - if (info) { - struct radeon_fb_device *rfbdev = info->par; + + if (rfb->obj) { rbo = rfb->obj->driver_private; - unregister_framebuffer(info); r = radeon_bo_reserve(rbo, false); if (likely(r == 0)) { radeon_bo_kunmap(rbo); radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } - drm_fb_helper_free(&rfbdev->helper); - framebuffer_release(info); + drm_gem_object_unreference_unlocked(rfb->obj); } + drm_fb_helper_fini(&rfbdev->helper); + drm_framebuffer_cleanup(&rfb->base); - printk(KERN_INFO "unregistered panic notifier\n"); + return 0; +} +static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { + .gamma_set = radeon_crtc_fb_gamma_set, + .gamma_get = radeon_crtc_fb_gamma_get, + .fb_probe = radeon_fb_find_or_create_single, +}; + +int radeon_fbdev_init(struct radeon_device *rdev) +{ + struct radeon_fbdev *rfbdev; + int bpp_sel = 32; + int ret; + + /* select 8 bpp console on RN50 or 16MB cards */ + if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) + bpp_sel = 8; + + rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); + if (!rfbdev) + return -ENOMEM; + + rfbdev->rdev = rdev; + rdev->mode_info.rfbdev = rfbdev; + rfbdev->helper.funcs = &radeon_fb_helper_funcs; + + ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, + rdev->num_crtc, + RADEONFB_CONN_LIMIT); + if (ret) { + kfree(rfbdev); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&rfbdev->helper); + drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; } -EXPORT_SYMBOL(radeonfb_remove); -MODULE_LICENSE("GPL"); + +void radeon_fbdev_fini(struct radeon_device *rdev) +{ + if (!rdev->mode_info.rfbdev) + return; + + radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); + kfree(rdev->mode_info.rfbdev); + rdev->mode_info.rfbdev = NULL; +} + +void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) +{ + fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); +} + +int radeon_fbdev_total_size(struct radeon_device *rdev) +{ + struct radeon_bo *robj; + int size = 0; + + robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; + size += radeon_bo_size(robj); + return size; +} + +bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) +{ + if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) + return true; + return false; +} diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 8495d4e32e18..b1f9a81b5d1d 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -33,6 +33,7 @@ #include <linux/wait.h> #include <linux/list.h> #include <linux/kref.h> +#include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "radeon_reg.h" @@ -57,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) radeon_fence_ring_emit(rdev, fence); fence->emited = true; - fence->timeout = jiffies + ((2000 * HZ) / 1000); list_del(&fence->list); list_add_tail(&fence->list, &rdev->fence_drv.emited); write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); @@ -70,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) struct list_head *i, *n; uint32_t seq; bool wake = false; + unsigned long cjiffies; - if (rdev == NULL) { - return true; - } - if (rdev->shutdown) { - return true; - } seq = RREG32(rdev->fence_drv.scratch_reg); - rdev->fence_drv.last_seq = seq; + if (seq != rdev->fence_drv.last_seq) { + rdev->fence_drv.last_seq = seq; + rdev->fence_drv.last_jiffies = jiffies; + rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; + } else { + cjiffies = jiffies; + if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { + cjiffies -= rdev->fence_drv.last_jiffies; + if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { + /* update the timeout */ + rdev->fence_drv.last_timeout -= cjiffies; + } else { + /* the 500ms timeout is elapsed we should test + * for GPU lockup + */ + rdev->fence_drv.last_timeout = 1; + } + } else { + /* wrap around update last jiffies, we will just wait + * a little longer + */ + rdev->fence_drv.last_jiffies = cjiffies; + } + return false; + } n = NULL; list_for_each(i, &rdev->fence_drv.emited) { fence = list_entry(i, struct radeon_fence, list); @@ -170,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence) int radeon_fence_wait(struct radeon_fence *fence, bool intr) { struct radeon_device *rdev; - unsigned long cur_jiffies; - unsigned long timeout; - bool expired = false; + unsigned long irq_flags, timeout; + u32 seq; int r; if (fence == NULL) { @@ -183,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) if (radeon_fence_signaled(fence)) { return 0; } - + timeout = rdev->fence_drv.last_timeout; retry: - cur_jiffies = jiffies; - timeout = HZ / 100; - if (time_after(fence->timeout, cur_jiffies)) { - timeout = fence->timeout - cur_jiffies; - } - + /* save current sequence used to check for GPU lockup */ + seq = rdev->fence_drv.last_seq; if (intr) { radeon_irq_kms_sw_irq_get(rdev); r = wait_event_interruptible_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); radeon_irq_kms_sw_irq_put(rdev); - if (unlikely(r < 0)) + if (unlikely(r < 0)) { return r; + } } else { radeon_irq_kms_sw_irq_get(rdev); r = wait_event_timeout(rdev->fence_drv.queue, @@ -205,38 +220,36 @@ retry: radeon_irq_kms_sw_irq_put(rdev); } if (unlikely(!radeon_fence_signaled(fence))) { - if (unlikely(r == 0)) { - expired = true; + /* we were interrupted for some reason and fence isn't + * isn't signaled yet, resume wait + */ + if (r) { + timeout = r; + goto retry; } - if (unlikely(expired)) { - timeout = 1; - if (time_after(cur_jiffies, fence->timeout)) { - timeout = cur_jiffies - fence->timeout; - } - timeout = jiffies_to_msecs(timeout); - if (timeout > 500) { - DRM_ERROR("fence(%p:0x%08X) %lums timeout " - "going to reset GPU\n", - fence, fence->seq, timeout); - radeon_gpu_reset(rdev); - WREG32(rdev->fence_drv.scratch_reg, fence->seq); - } + /* don't protect read access to rdev->fence_drv.last_seq + * if we experiencing a lockup the value doesn't change + */ + if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { + /* good news we believe it's a lockup */ + WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); + /* FIXME: what should we do ? marking everyone + * as signaled for now + */ + rdev->gpu_lockup = true; + r = radeon_gpu_reset(rdev); + if (r) + return r; + WREG32(rdev->fence_drv.scratch_reg, fence->seq); + rdev->gpu_lockup = false; } + timeout = RADEON_FENCE_JIFFIES_TIMEOUT; + write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); + rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; + rdev->fence_drv.last_jiffies = jiffies; + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); goto retry; } - if (unlikely(expired)) { - rdev->fence_drv.count_timeout++; - cur_jiffies = jiffies; - timeout = 1; - if (time_after(cur_jiffies, fence->timeout)) { - timeout = cur_jiffies - fence->timeout; - } - timeout = jiffies_to_msecs(timeout); - DRM_ERROR("fence(%p:0x%08X) %lums timeout\n", - fence, fence->seq, timeout); - DRM_ERROR("last signaled fence(0x%08X)\n", - rdev->fence_drv.last_seq); - } return 0; } @@ -332,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev) INIT_LIST_HEAD(&rdev->fence_drv.created); INIT_LIST_HEAD(&rdev->fence_drv.emited); INIT_LIST_HEAD(&rdev->fence_drv.signaled); - rdev->fence_drv.count_timeout = 0; init_waitqueue_head(&rdev->fence_drv.queue); rdev->fence_drv.initialized = true; write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 1770d3c07fd0..e65b90317fab 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int i, j; if (!rdev->gart.ready) { - DRM_ERROR("trying to bind memory to unitialized GART !\n"); + WARN(1, "trying to bind memory to unitialized GART !\n"); return -EINVAL; } t = offset / RADEON_GPU_PAGE_SIZE; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index ef92d147d8f0..a72a3ee5d69b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { radeon_bo_unref(&robj); } + + drm_gem_object_release(gobj); + kfree(gobj); } int radeon_gem_object_create(struct radeon_device *rdev, int size, @@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, args->vram_visible = rdev->mc.real_vram_size; if (rdev->stollen_vga_memory) args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); - if (rdev->fbdev_rbo) - args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); + args->vram_visible -= radeon_fbdev_total_size(rdev); args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; return 0; diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 4ae50c19589f..5def6f5dff38 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) return false; } +/* bit banging i2c */ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) { @@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data) WREG32(rec->en_data_reg, val); } +static int pre_xfer(struct i2c_adapter *i2c_adap) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + + radeon_i2c_do_lock(i2c, 1); + + return 0; +} + +static void post_xfer(struct i2c_adapter *i2c_adap) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + + radeon_i2c_do_lock(i2c, 0); +} + +/* hw i2c */ + static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) { - struct radeon_pll *spll = &rdev->clock.spll; u32 sclk = radeon_get_engine_clock(rdev); u32 prescale = 0; - u32 n, m; - u8 loop; + u32 nm; + u8 n, m, loop; int i2c_clock; switch (rdev->family) { @@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) case CHIP_R300: case CHIP_R350: case CHIP_RV350: - n = (spll->reference_freq) / (4 * 6); + i2c_clock = 60; + nm = (sclk * 10) / (i2c_clock * 4); for (loop = 1; loop < 255; loop++) { - if ((loop * (loop - 1)) > n) + if ((nm / loop) < loop) break; } - m = loop - 1; - prescale = m | (loop << 8); + n = loop - 1; + m = loop - 2; + prescale = m | (n << 8); break; case CHIP_RV380: case CHIP_RS400: @@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) case CHIP_R420: case CHIP_R423: case CHIP_RV410: - sclk = radeon_get_engine_clock(rdev); prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; break; case CHIP_RS600: @@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) case CHIP_RV570: case CHIP_R580: i2c_clock = 50; - sclk = radeon_get_engine_clock(rdev); if (rdev->family == CHIP_R520) prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); else @@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap, prescale = radeon_get_i2c_prescale(rdev); reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | + RADEON_I2C_DRIVE_EN | RADEON_I2C_START | RADEON_I2C_STOP | RADEON_I2C_GO); @@ -757,26 +776,13 @@ done: return ret; } -static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, +static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); - int ret; - - radeon_i2c_do_lock(i2c, 1); - ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num); - radeon_i2c_do_lock(i2c, 0); - - return ret; -} - -static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, - struct i2c_msg *msgs, int num) -{ - struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct radeon_device *rdev = i2c->dev->dev_private; struct radeon_i2c_bus_rec *rec = &i2c->rec; - int ret; + int ret = 0; switch (rdev->family) { case CHIP_R100: @@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, case CHIP_RV410: case CHIP_RS400: case CHIP_RS480: - if (rec->hw_capable) - ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); - else - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); break; case CHIP_RS600: case CHIP_RS690: case CHIP_RS740: /* XXX fill in hw i2c implementation */ - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); break; case CHIP_RV515: case CHIP_R520: @@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, case CHIP_RV560: case CHIP_RV570: case CHIP_R580: - if (rec->hw_capable) { - if (rec->mm_i2c) - ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); - else - ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); - } else - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + if (rec->mm_i2c) + ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); + else + ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); break; case CHIP_R600: case CHIP_RV610: case CHIP_RV630: case CHIP_RV670: /* XXX fill in hw i2c implementation */ - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); break; case CHIP_RV620: case CHIP_RV635: @@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, case CHIP_RV710: case CHIP_RV740: /* XXX fill in hw i2c implementation */ - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); break; case CHIP_CEDAR: case CHIP_REDWOOD: @@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, case CHIP_CYPRESS: case CHIP_HEMLOCK: /* XXX fill in hw i2c implementation */ - ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); break; default: DRM_ERROR("i2c: unhandled radeon chip\n"); @@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, return ret; } -static u32 radeon_i2c_func(struct i2c_adapter *adap) +static u32 radeon_hw_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm radeon_i2c_algo = { - .master_xfer = radeon_i2c_xfer, - .functionality = radeon_i2c_func, + .master_xfer = radeon_hw_i2c_xfer, + .functionality = radeon_hw_i2c_func, }; struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name) { + struct radeon_device *rdev = dev->dev_private; struct radeon_i2c_chan *i2c; int ret; @@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, if (i2c == NULL) return NULL; - /* set the internal bit adapter */ - i2c->algo.radeon.bit_adapter.owner = THIS_MODULE; - i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c); - sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name); - i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data; - i2c->algo.radeon.bit_data.setsda = set_data; - i2c->algo.radeon.bit_data.setscl = set_clock; - i2c->algo.radeon.bit_data.getsda = get_data; - i2c->algo.radeon.bit_data.getscl = get_clock; - i2c->algo.radeon.bit_data.udelay = 20; - /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always - * make this, 2 jiffies is a lot more reliable */ - i2c->algo.radeon.bit_data.timeout = 2; - i2c->algo.radeon.bit_data.data = i2c; - ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter); - if (ret) { - DRM_ERROR("Failed to register internal bit i2c %s\n", name); - goto out_free; - } - /* set the radeon i2c adapter */ - i2c->dev = dev; i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; + i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); - sprintf(i2c->adapter.name, "Radeon i2c %s", name); - i2c->adapter.algo_data = &i2c->algo.radeon; - i2c->adapter.algo = &radeon_i2c_algo; - ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register i2c %s\n", name); - goto out_free; + if (rec->mm_i2c || + (rec->hw_capable && + radeon_hw_i2c && + ((rdev->family <= CHIP_RS480) || + ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { + /* set the radeon hw i2c adapter */ + sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); + i2c->adapter.algo = &radeon_i2c_algo; + ret = i2c_add_adapter(&i2c->adapter); + if (ret) { + DRM_ERROR("Failed to register hw i2c %s\n", name); + goto out_free; + } + } else { + /* set the radeon bit adapter */ + sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); + i2c->adapter.algo_data = &i2c->algo.bit; + i2c->algo.bit.pre_xfer = pre_xfer; + i2c->algo.bit.post_xfer = post_xfer; + i2c->algo.bit.setsda = set_data; + i2c->algo.bit.setscl = set_clock; + i2c->algo.bit.getsda = get_data; + i2c->algo.bit.getscl = get_clock; + i2c->algo.bit.udelay = 20; + /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always + * make this, 2 jiffies is a lot more reliable */ + i2c->algo.bit.timeout = 2; + i2c->algo.bit.data = i2c; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + DRM_ERROR("Failed to register bit i2c %s\n", name); + goto out_free; + } } return i2c; @@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) return; - i2c_del_adapter(&i2c->algo.radeon.bit_adapter); - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} - -void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c) -{ - if (!i2c) - return; - i2c_del_adapter(&i2c->adapter); kfree(i2c); } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 3cfd60fd0083..059bfa4098d7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include "drmP.h" +#include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon_reg.h" #include "radeon.h" @@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work) radeon_connector_hotplug(connector); } /* Just fire off a uevent and let userspace tell us what to do */ - drm_sysfs_hotplug_event(dev); + drm_helper_hpd_irq_event(dev); } void radeon_driver_irq_preinstall_kms(struct drm_device *dev) @@ -67,9 +68,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) /* Disable *all* interrupts */ rdev->irq.sw_int = false; - for (i = 0; i < 2; i++) { + rdev->irq.gui_idle = false; + for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; - } + for (i = 0; i < 6; i++) + rdev->irq.hpd[i] = false; radeon_irq_set(rdev); /* Clear bits */ radeon_irq_process(rdev); @@ -95,34 +98,30 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) } /* Disable *all* interrupts */ rdev->irq.sw_int = false; - for (i = 0; i < 2; i++) { + rdev->irq.gui_idle = false; + for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; + for (i = 0; i < 6; i++) rdev->irq.hpd[i] = false; - } radeon_irq_set(rdev); } int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; - int num_crtc = 2; - if (rdev->flags & RADEON_SINGLE_CRTC) - num_crtc = 1; spin_lock_init(&rdev->irq.sw_lock); - r = drm_vblank_init(rdev->ddev, num_crtc); + r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; } /* enable msi */ rdev->msi_enabled = 0; - /* MSIs don't seem to work on my rs780; - * not sure about rs880 or other rs780s. - * Needs more investigation. + /* MSIs don't seem to work reliably on all IGP + * chips. Disable MSI on them for now. */ if ((rdev->family >= CHIP_RV380) && - (rdev->family != CHIP_RS780) && - (rdev->family != CHIP_RS880)) { + (!(rdev->flags & RADEON_IS_IGP))) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 20ec276e7596..ab389f89fa8d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -31,6 +31,7 @@ #include "radeon_drm.h" #include <linux/vga_switcheroo.h> +#include <linux/slab.h> int radeon_driver_unload_kms(struct drm_device *dev) { @@ -97,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_info *info; + struct radeon_mode_info *minfo = &rdev->mode_info; uint32_t *value_ptr; uint32_t value; + struct drm_crtc *crtc; + int i, found; info = data; value_ptr = (uint32_t *)((unsigned long)info->value); + value = *value_ptr; switch (info->request) { case RADEON_INFO_DEVICE_ID: value = dev->pci_device; @@ -113,6 +118,28 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) value = rdev->num_z_pipes; break; case RADEON_INFO_ACCEL_WORKING: + /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ + if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) + value = false; + else + value = rdev->accel_working; + break; + case RADEON_INFO_CRTC_FROM_ID: + for (i = 0, found = 0; i < rdev->num_crtc; i++) { + crtc = (struct drm_crtc *)minfo->crtcs[i]; + if (crtc && crtc->base.id == value) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + value = radeon_crtc->crtc_id; + found = 1; + break; + } + } + if (!found) { + DRM_DEBUG("unknown crtc id %d\n", value); + return -EINVAL; + } + break; + case RADEON_INFO_ACCEL_WORKING2: value = rdev->accel_working; break; default: @@ -164,7 +191,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; - if (crtc < 0 || crtc > 1) { + if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } @@ -176,7 +203,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; - if (crtc < 0 || crtc > 1) { + if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } @@ -190,7 +217,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; - if (crtc < 0 || crtc > 1) { + if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return; } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index df23d6a01d02..e1e5255396ac 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -26,7 +26,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> -#include "radeon_fixed.h" +#include <drm/drm_fixed.h> #include "radeon.h" #include "atom.h" @@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: + radeon_crtc->enabled = true; + /* adjust pm to dpms changes BEFORE enabling crtcs */ + radeon_pm_compute_clocks(rdev); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); else { @@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) RADEON_CRTC_DISP_REQ_EN_B)); WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); } + radeon_crtc->enabled = false; + /* adjust pm to dpms changes AFTER disabling crtcs */ + radeon_pm_compute_clocks(rdev); break; } } @@ -603,6 +609,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod ? RADEON_CRTC2_INTERLACE_EN : 0)); + /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ + if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) + crtc2_gen_cntl |= RADEON_CRTC2_EN; + disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; @@ -630,6 +640,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod ? RADEON_CRTC_INTERLACE_EN : 0)); + /* rs4xx chips seem to like to have the crtc enabled when the timing is set */ + if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) + crtc_gen_cntl |= RADEON_CRTC_EN; + crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | RADEON_CRTC_VSYNC_DIS | @@ -958,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + + /* adjust pm to upcoming mode change */ + radeon_pm_compute_clocks(rdev); + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index cf389ce50a8a..5688a0cf6bbe 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) udelay(panel_pwr_delay * 1000); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); + udelay(panel_pwr_delay * 1000); break; } @@ -116,8 +117,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) @@ -217,27 +216,14 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* get the native mode for LVDS */ - if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { - struct drm_display_mode *native_mode = &radeon_encoder->native_mode; - int mode_id = adjusted_mode->base.id; - *adjusted_mode = *native_mode; - adjusted_mode->hdisplay = mode->hdisplay; - adjusted_mode->vdisplay = mode->vdisplay; - adjusted_mode->crtc_hdisplay = mode->hdisplay; - adjusted_mode->crtc_vdisplay = mode->vdisplay; - adjusted_mode->base.id = mode_id; - } + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) + radeon_panel_mode_fixup(encoder, adjusted_mode); return true; } @@ -294,8 +280,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) @@ -482,8 +466,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) @@ -650,8 +632,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) @@ -830,8 +810,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) tv_dac_cntl |= (R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | @@ -860,8 +840,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - /* adjust pm to dpms change */ - radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) @@ -907,35 +885,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) { + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | - RADEON_TV_DAC_BGADJ_MASK | - R420_TV_DAC_DACADJ_MASK | - R420_TV_DAC_RDACPD | - R420_TV_DAC_GDACPD | - R420_TV_DAC_BDACPD | - R420_TV_DAC_TVENABLE); + RADEON_TV_DAC_BGADJ_MASK | + R420_TV_DAC_DACADJ_MASK | + R420_TV_DAC_RDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_BDACPD | + R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | - RADEON_TV_DAC_BGADJ_MASK | - RADEON_TV_DAC_DACADJ_MASK | - RADEON_TV_DAC_RDACPD | - RADEON_TV_DAC_GDACPD | - RADEON_TV_DAC_BDACPD); + RADEON_TV_DAC_BGADJ_MASK | + RADEON_TV_DAC_DACADJ_MASK | + RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD); } - /* FIXME TV */ - if (tv_dac) { - struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | - RADEON_TV_DAC_NHOLD | - RADEON_TV_DAC_STD_PS2 | - tv_dac->ps2_tvdac_adj); + tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; + + if (is_tv) { + if (tv_dac->tv_std == TV_STD_NTSC || + tv_dac->tv_std == TV_STD_NTSC_J || + tv_dac->tv_std == TV_STD_PAL_M || + tv_dac->tv_std == TV_STD_PAL_60) + tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; + else + tv_dac_cntl |= tv_dac->pal_tvdac_adj; + + if (tv_dac->tv_std == TV_STD_NTSC || + tv_dac->tv_std == TV_STD_NTSC_J) + tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; + else + tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; } else - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | - RADEON_TV_DAC_NHOLD | - RADEON_TV_DAC_STD_PS2); + tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | + tv_dac->ps2_tvdac_adj); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } @@ -943,16 +929,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); - } - - if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) - disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); - else + } else if (rdev->family != CHIP_R200) disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); - - if (rdev->family == CHIP_R200) + else if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); + if (rdev->family >= CHIP_R200) + disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); + if (is_tv) { uint32_t dac_cntl; @@ -1017,15 +1001,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); - } + } else if (rdev->family != CHIP_R200) + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); + else if (rdev->family == CHIP_R200) + WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->family >= CHIP_R200) WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); - else - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); - - if (rdev->family == CHIP_R200) - WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (is_tv) radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); @@ -1183,6 +1165,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; bool color = true; + struct drm_crtc *crtc; + + /* find out if crtc2 is in use or if this encoder is using it */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { + if (encoder->crtc != crtc) { + return connector_status_disconnected; + } + } + } if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || connector->connector_type == DRM_MODE_CONNECTOR_Composite || diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 417684daef4c..032040397743 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c @@ -57,6 +57,10 @@ #define NTSC_TV_PLL_N_14 693 #define NTSC_TV_PLL_P_14 7 +#define PAL_TV_PLL_M_14 19 +#define PAL_TV_PLL_N_14 353 +#define PAL_TV_PLL_P_14 5 + #define VERT_LEAD_IN_LINES 2 #define FRAC_BITS 0xe #define FRAC_MASK 0x3fff @@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = { 630627, /* defRestart */ 347, /* crtcPLL_N */ 14, /* crtcPLL_M */ - 8, /* crtcPLL_postDiv */ + 8, /* crtcPLL_postDiv */ 1022, /* pixToTV */ }, + { /* PAL timing for 14 Mhz ref clk */ + 800, /* horResolution */ + 600, /* verResolution */ + TV_STD_PAL, /* standard */ + 1131, /* horTotal */ + 742, /* verTotal */ + 813, /* horStart */ + 840, /* horSyncStart */ + 633, /* verSyncStart */ + 708369, /* defRestart */ + 211, /* crtcPLL_N */ + 9, /* crtcPLL_M */ + 8, /* crtcPLL_postDiv */ + 759, /* pixToTV */ + }, }; #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) @@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru if (pll->reference_freq == 2700) const_ptr = &available_tv_modes[1]; else - const_ptr = &available_tv_modes[1]; /* FIX ME */ + const_ptr = &available_tv_modes[3]; } return const_ptr; } @@ -623,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, } flicker_removal = (tmp + 500) / 1000; - if (flicker_removal < 2) - flicker_removal = 2; + if (flicker_removal < 3) + flicker_removal = 3; for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { if (flicker_removal == SLOPE_limit[i]) break; @@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, n = PAL_TV_PLL_N_27; p = PAL_TV_PLL_P_27; } else { - m = PAL_TV_PLL_M_27; - n = PAL_TV_PLL_N_27; - p = PAL_TV_PLL_P_27; + m = PAL_TV_PLL_M_14; + n = PAL_TV_PLL_N_14; + p = PAL_TV_PLL_P_14; } } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 1702b820aa4d..95696aa57ac8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -34,11 +34,12 @@ #include <drm_mode.h> #include <drm_edid.h> #include <drm_dp_helper.h> +#include <drm_fixed.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> -#include "radeon_fixed.h" +struct radeon_bo; struct radeon_device; #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) @@ -65,6 +66,16 @@ enum radeon_tv_std { TV_STD_PAL_N, }; +enum radeon_hpd_id { + RADEON_HPD_1 = 0, + RADEON_HPD_2, + RADEON_HPD_3, + RADEON_HPD_4, + RADEON_HPD_5, + RADEON_HPD_6, + RADEON_HPD_NONE = 0xff, +}; + /* radeon gpio-based i2c * 1. "mask" reg and bits * grabs the gpio pins for software use @@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec { /* id used by atom */ uint8_t i2c_id; /* id used by atom */ - uint8_t hpd_id; + enum radeon_hpd_id hpd; /* can be used with hw i2c engine */ bool hw_capable; /* uses multi-media i2c engine */ @@ -129,6 +140,7 @@ struct radeon_tmds_pll { #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) #define RADEON_PLL_USE_POST_DIV (1 << 12) +#define RADEON_PLL_IS_LCD (1 << 13) /* pll algo */ enum radeon_pll_algo { @@ -149,6 +161,8 @@ struct radeon_pll { uint32_t pll_in_max; uint32_t pll_out_min; uint32_t pll_out_max; + uint32_t lcd_pll_out_min; + uint32_t lcd_pll_out_max; uint32_t best_vco; /* divider limits */ @@ -170,17 +184,12 @@ struct radeon_pll { enum radeon_pll_algo algo; }; -struct i2c_algo_radeon_data { - struct i2c_adapter bit_adapter; - struct i2c_algo_bit_data bit_data; -}; - struct radeon_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; union { + struct i2c_algo_bit_data bit; struct i2c_algo_dp_aux_data dp; - struct i2c_algo_radeon_data radeon; } algo; struct radeon_i2c_bus_rec rec; }; @@ -197,6 +206,7 @@ enum radeon_connector_table { CT_MINI_INTERNAL, CT_IMAC_G5_ISIGHT, CT_EMAC, + CT_RN50_POWER, }; enum radeon_dvo_chip { @@ -204,6 +214,8 @@ enum radeon_dvo_chip { DVO_SIL1178, }; +struct radeon_fbdev; + struct radeon_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; @@ -220,6 +232,9 @@ struct radeon_mode_info { struct drm_property *tmds_pll_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; + + /* pointer to fbdev info structure */ + struct radeon_fbdev *rfbdev; }; #define MAX_H_CODE_TIMING_LEN 32 @@ -341,7 +356,9 @@ struct radeon_encoder { enum radeon_rmx_type rmx_type; struct drm_display_mode native_mode; void *enc_priv; + int audio_polling_active; int hdmi_offset; + int hdmi_config_offset; int hdmi_audio_workaround; int hdmi_buffer_status; }; @@ -364,16 +381,6 @@ struct radeon_gpio_rec { u32 mask; }; -enum radeon_hpd_id { - RADEON_HPD_NONE = 0, - RADEON_HPD_1, - RADEON_HPD_2, - RADEON_HPD_3, - RADEON_HPD_4, - RADEON_HPD_5, - RADEON_HPD_6, -}; - struct radeon_hpd { enum radeon_hpd_id hpd; u8 plugged_state; @@ -431,7 +438,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name); extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); -extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c); extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, u8 slave_addr, u8 addr, @@ -534,11 +540,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); -struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj); - -int radeonfb_probe(struct drm_device *dev); +void radeon_framebuffer_init(struct drm_device *dev, + struct radeon_framebuffer *rfb, + struct drm_mode_fb_cmd *mode_cmd, + struct drm_gem_object *obj); int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); @@ -560,6 +565,8 @@ extern int radeon_static_clocks_init(struct drm_device *dev); bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); +void radeon_panel_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode); void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc); /* legacy tv */ @@ -575,4 +582,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder, void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + +/* fbdev layer */ +int radeon_fbdev_init(struct radeon_device *rdev); +void radeon_fbdev_fini(struct radeon_device *rdev); +void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); +int radeon_fbdev_total_size(struct radeon_device *rdev); +bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); + +void radeon_fb_output_poll_changed(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index fc9d00ac6b15..d5b9373ce06c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -30,6 +30,7 @@ * Dave Airlie */ #include <linux/list.h> +#include <linux/slab.h> #include <drm/drmP.h> #include "radeon_drm.h" #include "radeon.h" @@ -111,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ + mutex_lock(&rdev->vram_mutex); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, 0, 0, !kernel, NULL, size, &radeon_ttm_bo_destroy); + mutex_unlock(&rdev->vram_mutex); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(rdev->dev, @@ -165,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo) void radeon_bo_unref(struct radeon_bo **bo) { struct ttm_buffer_object *tbo; + struct radeon_device *rdev; if ((*bo) == NULL) return; + rdev = (*bo)->rdev; tbo = &((*bo)->tbo); + mutex_lock(&rdev->vram_mutex); ttm_bo_unref(&tbo); + mutex_unlock(&rdev->vram_mutex); if (tbo == NULL) *bo = NULL; } @@ -185,11 +192,13 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return 0; } radeon_ttm_placement_from_domain(bo, domain); - /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + if (domain == RADEON_GEM_DOMAIN_VRAM) { + /* force to pin into visible video ram */ + bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + } for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); if (likely(r == 0)) { bo->pin_count = 1; if (gpu_addr != NULL) @@ -213,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); if (unlikely(r != 0)) dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); return r; @@ -292,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head) r = radeon_bo_reserve(lobj->bo, false); if (unlikely(r != 0)) return r; + lobj->reserved = true; } return 0; } @@ -302,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head) list_for_each_entry(lobj, head, list) { /* only unreserve object we successfully reserved */ - if (radeon_bo_is_reserved(lobj->bo)) + if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) radeon_bo_unreserve(lobj->bo); } } @@ -313,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head) struct radeon_bo *bo; int r; + list_for_each_entry(lobj, head, list) { + lobj->reserved = false; + } r = radeon_bo_list_reserve(head); if (unlikely(r != 0)) { return r; @@ -328,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head) lobj->rdomain); } r = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); + true, false, false); if (unlikely(r)) return r; } @@ -496,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, radeon_bo_check_tiling(rbo, 0, 1); } -void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { + struct radeon_device *rdev; struct radeon_bo *rbo; + unsigned long offset, size; + int r; + if (!radeon_ttm_bo_is_radeon_bo(bo)) - return; + return 0; rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); + rdev = rbo->rdev; + if (bo->mem.mem_type == TTM_PL_VRAM) { + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.mm_node->start << PAGE_SHIFT; + if ((offset + size) > rdev->mc.visible_vram_size) { + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, true, false); + if (unlikely(r != 0)) + return r; + offset = bo->mem.mm_node->start << PAGE_SHIFT; + /* this should not happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + } + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 7ab43de1e244..353998dc2c03 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); #endif diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d4d1c39a0e99..3fa6984d9896 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -23,24 +23,17 @@ #include "drmP.h" #include "radeon.h" #include "avivod.h" +#ifdef CONFIG_ACPI +#include <linux/acpi.h> +#endif +#include <linux/power_supply.h> #define RADEON_IDLE_LOOP_MS 100 #define RADEON_RECLOCK_DELAY_MS 200 #define RADEON_WAIT_VBLANK_TIMEOUT 200 +#define RADEON_WAIT_IDLE_TIMEOUT 200 -static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); -static void radeon_pm_set_clocks(struct radeon_device *rdev); -static void radeon_pm_idle_work_handler(struct work_struct *work); -static int radeon_debugfs_pm_init(struct radeon_device *rdev); - -static const char *pm_state_names[4] = { - "PM_STATE_DISABLED", - "PM_STATE_MINIMUM", - "PM_STATE_PAUSED", - "PM_STATE_ACTIVE" -}; - -static const char *pm_state_types[5] = { +static const char *radeon_pm_state_type_name[5] = { "Default", "Powersave", "Battery", @@ -48,336 +41,669 @@ static const char *pm_state_types[5] = { "Performance", }; -static void radeon_print_power_mode_info(struct radeon_device *rdev) +static void radeon_dynpm_idle_work_handler(struct work_struct *work); +static int radeon_debugfs_pm_init(struct radeon_device *rdev); +static bool radeon_pm_in_vbl(struct radeon_device *rdev); +static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); +static void radeon_pm_update_profile(struct radeon_device *rdev); +static void radeon_pm_set_clocks(struct radeon_device *rdev); + +#define ACPI_AC_CLASS "ac_adapter" + +#ifdef CONFIG_ACPI +static int radeon_acpi_event(struct notifier_block *nb, + unsigned long val, + void *data) { - int i, j; - bool is_default; + struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); + struct acpi_bus_event *entry = (struct acpi_bus_event *)data; - DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); - for (i = 0; i < rdev->pm.num_power_states; i++) { - if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) - is_default = true; + if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { + if (power_supply_is_system_supplied() > 0) + DRM_DEBUG("pm: AC\n"); else - is_default = false; - DRM_INFO("State %d %s %s\n", i, - pm_state_types[rdev->pm.power_state[i].type], - is_default ? "(default)" : ""); - if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) - DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); - DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); - for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { - if (rdev->flags & RADEON_IS_IGP) - DRM_INFO("\t\t%d engine: %d\n", - j, - rdev->pm.power_state[i].clock_info[j].sclk * 10); - else - DRM_INFO("\t\t%d engine/memory: %d/%d\n", - j, - rdev->pm.power_state[i].clock_info[j].sclk * 10, - rdev->pm.power_state[i].clock_info[j].mclk * 10); + DRM_DEBUG("pm: DC\n"); + + if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + if (rdev->pm.profile == PM_PROFILE_AUTO) { + mutex_lock(&rdev->pm.mutex); + radeon_pm_update_profile(rdev); + radeon_pm_set_clocks(rdev); + mutex_unlock(&rdev->pm.mutex); + } } } + + return NOTIFY_OK; } +#endif -static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type type) +static void radeon_pm_update_profile(struct radeon_device *rdev) { - int i, j; - enum radeon_pm_state_type wanted_types[2]; - int wanted_count; - - switch (type) { - case POWER_STATE_TYPE_DEFAULT: - default: - return rdev->pm.default_power_state; - case POWER_STATE_TYPE_POWERSAVE: - if (rdev->flags & RADEON_IS_MOBILITY) { - wanted_types[0] = POWER_STATE_TYPE_POWERSAVE; - wanted_types[1] = POWER_STATE_TYPE_BATTERY; - wanted_count = 2; - } else { - wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; - wanted_count = 1; - } + switch (rdev->pm.profile) { + case PM_PROFILE_DEFAULT: + rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; break; - case POWER_STATE_TYPE_BATTERY: - if (rdev->flags & RADEON_IS_MOBILITY) { - wanted_types[0] = POWER_STATE_TYPE_BATTERY; - wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; - wanted_count = 2; + case PM_PROFILE_AUTO: + if (power_supply_is_system_supplied() > 0) { + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; } else { - wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; - wanted_count = 1; + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; } break; - case POWER_STATE_TYPE_BALANCED: - case POWER_STATE_TYPE_PERFORMANCE: - wanted_types[0] = type; - wanted_count = 1; + case PM_PROFILE_LOW: + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; + break; + case PM_PROFILE_MID: + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; break; + case PM_PROFILE_HIGH: + if (rdev->pm.active_crtc_count > 1) + rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; + else + rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; + break; + } + + if (rdev->pm.active_crtc_count == 0) { + rdev->pm.requested_power_state_index = + rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; + rdev->pm.requested_clock_mode_index = + rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; + } else { + rdev->pm.requested_power_state_index = + rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; + rdev->pm.requested_clock_mode_index = + rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; + } +} + +static void radeon_unmap_vram_bos(struct radeon_device *rdev) +{ + struct radeon_bo *bo, *n; + + if (list_empty(&rdev->gem.objects)) + return; + + list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) + ttm_bo_unmap_virtual(&bo->tbo); + } +} + +static void radeon_sync_with_vblank(struct radeon_device *rdev) +{ + if (rdev->pm.active_crtcs) { + rdev->pm.vblank_sync = false; + wait_event_timeout( + rdev->irq.vblank_queue, rdev->pm.vblank_sync, + msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); + } +} + +static void radeon_set_power_state(struct radeon_device *rdev) +{ + u32 sclk, mclk; + bool misc_after = false; + + if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && + (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) + return; + + if (radeon_gui_idle(rdev)) { + sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].sclk; + if (sclk > rdev->clock.default_sclk) + sclk = rdev->clock.default_sclk; + + mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].mclk; + if (mclk > rdev->clock.default_mclk) + mclk = rdev->clock.default_mclk; + + /* upvolt before raising clocks, downvolt after lowering clocks */ + if (sclk < rdev->pm.current_sclk) + misc_after = true; + + radeon_sync_with_vblank(rdev); + + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + if (!radeon_pm_in_vbl(rdev)) + return; + } + + radeon_pm_prepare(rdev); + + if (!misc_after) + /* voltage, pcie lanes, etc.*/ + radeon_pm_misc(rdev); + + /* set engine clock */ + if (sclk != rdev->pm.current_sclk) { + radeon_pm_debug_check_in_vbl(rdev, false); + radeon_set_engine_clock(rdev, sclk); + radeon_pm_debug_check_in_vbl(rdev, true); + rdev->pm.current_sclk = sclk; + DRM_DEBUG("Setting: e: %d\n", sclk); + } + + /* set memory clock */ + if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { + radeon_pm_debug_check_in_vbl(rdev, false); + radeon_set_memory_clock(rdev, mclk); + radeon_pm_debug_check_in_vbl(rdev, true); + rdev->pm.current_mclk = mclk; + DRM_DEBUG("Setting: m: %d\n", mclk); + } + + if (misc_after) + /* voltage, pcie lanes, etc.*/ + radeon_pm_misc(rdev); + + radeon_pm_finish(rdev); + + rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; + rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; + } else + DRM_DEBUG("pm: GUI not idle!!!\n"); +} + +static void radeon_pm_set_clocks(struct radeon_device *rdev) +{ + int i; + + mutex_lock(&rdev->ddev->struct_mutex); + mutex_lock(&rdev->vram_mutex); + mutex_lock(&rdev->cp.mutex); + + /* gui idle int has issues on older chips it seems */ + if (rdev->family >= CHIP_R600) { + if (rdev->irq.installed) { + /* wait for GPU idle */ + rdev->pm.gui_idle = false; + rdev->irq.gui_idle = true; + radeon_irq_set(rdev); + wait_event_interruptible_timeout( + rdev->irq.idle_queue, rdev->pm.gui_idle, + msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); + rdev->irq.gui_idle = false; + radeon_irq_set(rdev); + } + } else { + if (rdev->cp.ready) { + struct radeon_fence *fence; + radeon_ring_alloc(rdev, 64); + radeon_fence_create(rdev, &fence); + radeon_fence_emit(rdev, fence); + radeon_ring_commit(rdev); + radeon_fence_wait(fence, false); + radeon_fence_unref(&fence); + } + } + radeon_unmap_vram_bos(rdev); + + if (rdev->irq.installed) { + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->pm.active_crtcs & (1 << i)) { + rdev->pm.req_vblank |= (1 << i); + drm_vblank_get(rdev->ddev, i); + } + } } - for (i = 0; i < wanted_count; i++) { - for (j = 0; j < rdev->pm.num_power_states; j++) { - if (rdev->pm.power_state[j].type == wanted_types[i]) - return &rdev->pm.power_state[j]; + radeon_set_power_state(rdev); + + if (rdev->irq.installed) { + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->pm.req_vblank & (1 << i)) { + rdev->pm.req_vblank &= ~(1 << i); + drm_vblank_put(rdev->ddev, i); + } } } - return rdev->pm.default_power_state; + /* update display watermarks based on new power state */ + radeon_update_bandwidth_info(rdev); + if (rdev->pm.active_crtc_count) + radeon_bandwidth_update(rdev); + + rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; + + mutex_unlock(&rdev->cp.mutex); + mutex_unlock(&rdev->vram_mutex); + mutex_unlock(&rdev->ddev->struct_mutex); } -static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev, - struct radeon_power_state *power_state, - enum radeon_pm_clock_mode_type type) +static void radeon_pm_print_states(struct radeon_device *rdev) { - switch (type) { - case POWER_MODE_TYPE_DEFAULT: - default: - return power_state->default_clock_mode; - case POWER_MODE_TYPE_LOW: - return &power_state->clock_info[0]; - case POWER_MODE_TYPE_MID: - if (power_state->num_clock_modes > 2) - return &power_state->clock_info[1]; - else - return &power_state->clock_info[0]; - break; - case POWER_MODE_TYPE_HIGH: - return &power_state->clock_info[power_state->num_clock_modes - 1]; + int i, j; + struct radeon_power_state *power_state; + struct radeon_pm_clock_info *clock_info; + + DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); + for (i = 0; i < rdev->pm.num_power_states; i++) { + power_state = &rdev->pm.power_state[i]; + DRM_DEBUG("State %d: %s\n", i, + radeon_pm_state_type_name[power_state->type]); + if (i == rdev->pm.default_power_state_index) + DRM_DEBUG("\tDefault"); + if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) + DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); + if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + DRM_DEBUG("\tSingle display only\n"); + DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); + for (j = 0; j < power_state->num_clock_modes; j++) { + clock_info = &(power_state->clock_info[j]); + if (rdev->flags & RADEON_IS_IGP) + DRM_DEBUG("\t\t%d e: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); + else + DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->mclk * 10, + clock_info->voltage.voltage, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); + } } +} +static ssize_t radeon_get_pm_profile(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + int cp = rdev->pm.profile; + + return snprintf(buf, PAGE_SIZE, "%s\n", + (cp == PM_PROFILE_AUTO) ? "auto" : + (cp == PM_PROFILE_LOW) ? "low" : + (cp == PM_PROFILE_MID) ? "mid" : + (cp == PM_PROFILE_HIGH) ? "high" : "default"); } -static void radeon_get_power_state(struct radeon_device *rdev, - enum radeon_pm_action action) +static ssize_t radeon_set_pm_profile(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { - switch (action) { - case PM_ACTION_MINIMUM: - rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); - rdev->pm.requested_clock_mode = - radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW); - break; - case PM_ACTION_DOWNCLOCK: - rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE); - rdev->pm.requested_clock_mode = - radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID); - break; - case PM_ACTION_UPCLOCK: - rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT); - rdev->pm.requested_clock_mode = - radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH); - break; - case PM_ACTION_NONE: - default: - DRM_ERROR("Requested mode for not defined action\n"); - return; + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + + mutex_lock(&rdev->pm.mutex); + if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + if (strncmp("default", buf, strlen("default")) == 0) + rdev->pm.profile = PM_PROFILE_DEFAULT; + else if (strncmp("auto", buf, strlen("auto")) == 0) + rdev->pm.profile = PM_PROFILE_AUTO; + else if (strncmp("low", buf, strlen("low")) == 0) + rdev->pm.profile = PM_PROFILE_LOW; + else if (strncmp("mid", buf, strlen("mid")) == 0) + rdev->pm.profile = PM_PROFILE_MID; + else if (strncmp("high", buf, strlen("high")) == 0) + rdev->pm.profile = PM_PROFILE_HIGH; + else { + DRM_ERROR("invalid power profile!\n"); + goto fail; + } + radeon_pm_update_profile(rdev); + radeon_pm_set_clocks(rdev); } - DRM_INFO("Requested: e: %d m: %d p: %d\n", - rdev->pm.requested_clock_mode->sclk, - rdev->pm.requested_clock_mode->mclk, - rdev->pm.requested_power_state->non_clock_info.pcie_lanes); +fail: + mutex_unlock(&rdev->pm.mutex); + + return count; } -static void radeon_set_power_state(struct radeon_device *rdev) +static ssize_t radeon_get_pm_method(struct device *dev, + struct device_attribute *attr, + char *buf) { - /* if *_clock_mode are the same, *_power_state are as well */ - if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) - return; + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + int pm = rdev->pm.pm_method; - DRM_INFO("Setting: e: %d m: %d p: %d\n", - rdev->pm.requested_clock_mode->sclk, - rdev->pm.requested_clock_mode->mclk, - rdev->pm.requested_power_state->non_clock_info.pcie_lanes); - /* set pcie lanes */ - /* set voltage */ - /* set engine clock */ - radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); - /* set memory clock */ - - rdev->pm.current_power_state = rdev->pm.requested_power_state; - rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; + return snprintf(buf, PAGE_SIZE, "%s\n", + (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); +} + +static ssize_t radeon_set_pm_method(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + + + if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { + mutex_lock(&rdev->pm.mutex); + rdev->pm.pm_method = PM_METHOD_DYNPM; + rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; + mutex_unlock(&rdev->pm.mutex); + } else if (strncmp("profile", buf, strlen("profile")) == 0) { + bool flush_wq = false; + + mutex_lock(&rdev->pm.mutex); + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + flush_wq = true; + } + /* disable dynpm */ + rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; + rdev->pm.pm_method = PM_METHOD_PROFILE; + mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); + } else { + DRM_ERROR("invalid power method!\n"); + goto fail; + } + radeon_pm_compute_clocks(rdev); +fail: + return count; +} + +static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); +static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); + +void radeon_pm_suspend(struct radeon_device *rdev) +{ + bool flush_wq = false; + + mutex_lock(&rdev->pm.mutex); + if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) + rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; + flush_wq = true; + } + mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); +} + +void radeon_pm_resume(struct radeon_device *rdev) +{ + /* asic init will reset the default power state */ + mutex_lock(&rdev->pm.mutex); + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + if (rdev->pm.pm_method == PM_METHOD_DYNPM + && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { + rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } + mutex_unlock(&rdev->pm.mutex); + radeon_pm_compute_clocks(rdev); } int radeon_pm_init(struct radeon_device *rdev) { - rdev->pm.state = PM_STATE_DISABLED; - rdev->pm.planned_action = PM_ACTION_NONE; - rdev->pm.downclocked = false; + int ret; + /* default to profile method */ + rdev->pm.pm_method = PM_METHOD_PROFILE; + rdev->pm.profile = PM_PROFILE_DEFAULT; + rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; + rdev->pm.dynpm_can_upclock = true; + rdev->pm.dynpm_can_downclock = true; + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; if (rdev->bios) { if (rdev->is_atom_bios) radeon_atombios_get_power_modes(rdev); else radeon_combios_get_power_modes(rdev); - radeon_print_power_mode_info(rdev); + radeon_pm_print_states(rdev); + radeon_pm_init_profile(rdev); } - if (radeon_debugfs_pm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for PM!\n"); - } + if (rdev->pm.num_power_states > 1) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + +#ifdef CONFIG_ACPI + rdev->acpi_nb.notifier_call = radeon_acpi_event; + register_acpi_notifier(&rdev->acpi_nb); +#endif + INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); - INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); + if (radeon_debugfs_pm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for PM!\n"); + } - if (radeon_dynpm != -1 && radeon_dynpm) { - rdev->pm.state = PM_STATE_PAUSED; - DRM_INFO("radeon: dynamic power management enabled\n"); + DRM_INFO("radeon: power management initialized\n"); } - DRM_INFO("radeon: power management initialized\n"); - return 0; } +void radeon_pm_fini(struct radeon_device *rdev) +{ + if (rdev->pm.num_power_states > 1) { + bool flush_wq = false; + + mutex_lock(&rdev->pm.mutex); + if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + rdev->pm.profile = PM_PROFILE_DEFAULT; + radeon_pm_update_profile(rdev); + radeon_pm_set_clocks(rdev); + } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + /* cancel work */ + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + flush_wq = true; + /* reset default clocks */ + rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; + radeon_pm_set_clocks(rdev); + } + mutex_unlock(&rdev->pm.mutex); + if (flush_wq) + flush_workqueue(rdev->wq); + + device_remove_file(rdev->dev, &dev_attr_power_profile); + device_remove_file(rdev->dev, &dev_attr_power_method); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&rdev->acpi_nb); +#endif + } + + if (rdev->pm.i2c_bus) + radeon_i2c_destroy(rdev->pm.i2c_bus); +} + void radeon_pm_compute_clocks(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; - struct drm_connector *connector; + struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; - int count = 0; - if (rdev->pm.state == PM_STATE_DISABLED) + if (rdev->pm.num_power_states < 2) return; mutex_lock(&rdev->pm.mutex); rdev->pm.active_crtcs = 0; - list_for_each_entry(connector, - &ddev->mode_config.connector_list, head) { - if (connector->encoder && - connector->dpms != DRM_MODE_DPMS_OFF) { - radeon_crtc = to_radeon_crtc(connector->encoder->crtc); + rdev->pm.active_crtc_count = 0; + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); - ++count; + rdev->pm.active_crtc_count++; } } - if (count > 1) { - if (rdev->pm.state == PM_STATE_ACTIVE) { - cancel_delayed_work(&rdev->pm.idle_work); - - rdev->pm.state = PM_STATE_PAUSED; - rdev->pm.planned_action = PM_ACTION_UPCLOCK; - if (rdev->pm.downclocked) - radeon_pm_set_clocks(rdev); - - DRM_DEBUG("radeon: dynamic power management deactivated\n"); - } - } else if (count == 1) { - /* TODO: Increase clocks if needed for current mode */ - - if (rdev->pm.state == PM_STATE_MINIMUM) { - rdev->pm.state = PM_STATE_ACTIVE; - rdev->pm.planned_action = PM_ACTION_UPCLOCK; - radeon_pm_set_clocks(rdev); - - queue_delayed_work(rdev->wq, &rdev->pm.idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); - } - else if (rdev->pm.state == PM_STATE_PAUSED) { - rdev->pm.state = PM_STATE_ACTIVE; - queue_delayed_work(rdev->wq, &rdev->pm.idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); - DRM_DEBUG("radeon: dynamic power management activated\n"); - } - } - else { /* count == 0 */ - if (rdev->pm.state != PM_STATE_MINIMUM) { - cancel_delayed_work(&rdev->pm.idle_work); - - rdev->pm.state = PM_STATE_MINIMUM; - rdev->pm.planned_action = PM_ACTION_MINIMUM; - radeon_pm_set_clocks(rdev); + if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + radeon_pm_update_profile(rdev); + radeon_pm_set_clocks(rdev); + } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { + if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { + if (rdev->pm.active_crtc_count > 1) { + if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + + rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; + radeon_pm_get_dynpm_state(rdev); + radeon_pm_set_clocks(rdev); + + DRM_DEBUG("radeon: dynamic power management deactivated\n"); + } + } else if (rdev->pm.active_crtc_count == 1) { + /* TODO: Increase clocks if needed for current mode */ + + if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { + rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; + radeon_pm_get_dynpm_state(rdev); + radeon_pm_set_clocks(rdev); + + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { + rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + DRM_DEBUG("radeon: dynamic power management activated\n"); + } + } else { /* count == 0 */ + if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { + cancel_delayed_work(&rdev->pm.dynpm_idle_work); + + rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; + rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; + radeon_pm_get_dynpm_state(rdev); + radeon_pm_set_clocks(rdev); + } + } } } mutex_unlock(&rdev->pm.mutex); } -static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) +static bool radeon_pm_in_vbl(struct radeon_device *rdev) { - u32 stat_crtc1 = 0, stat_crtc2 = 0; + u32 stat_crtc = 0, vbl = 0, position = 0; bool in_vbl = true; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + if (rdev->pm.active_crtcs & (1 << 0)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 1)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 2)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 3)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 4)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 5)) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; + } + } else if (ASIC_IS_AVIVO(rdev)) { + if (rdev->pm.active_crtcs & (1 << 0)) { + vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; + position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; + } + if (rdev->pm.active_crtcs & (1 << 1)) { + vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; + position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; + } + if (position < vbl && position > 1) + in_vbl = false; + } else { if (rdev->pm.active_crtcs & (1 << 0)) { - stat_crtc1 = RREG32(D1CRTC_STATUS); - if (!(stat_crtc1 & 1)) + stat_crtc = RREG32(RADEON_CRTC_STATUS); + if (!(stat_crtc & 1)) in_vbl = false; } if (rdev->pm.active_crtcs & (1 << 1)) { - stat_crtc2 = RREG32(D2CRTC_STATUS); - if (!(stat_crtc2 & 1)) + stat_crtc = RREG32(RADEON_CRTC2_STATUS); + if (!(stat_crtc & 1)) in_vbl = false; } } - if (in_vbl == false) - DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1, - stat_crtc2, finish ? "exit" : "entry"); - return in_vbl; -} -static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) -{ - /*radeon_fence_wait_last(rdev);*/ - switch (rdev->pm.planned_action) { - case PM_ACTION_UPCLOCK: - rdev->pm.downclocked = false; - break; - case PM_ACTION_DOWNCLOCK: - rdev->pm.downclocked = true; - break; - case PM_ACTION_MINIMUM: - break; - case PM_ACTION_NONE: - DRM_ERROR("%s: PM_ACTION_NONE\n", __func__); - break; - } - /* check if we are in vblank */ - radeon_pm_debug_check_in_vbl(rdev, false); - radeon_set_power_state(rdev); - radeon_pm_debug_check_in_vbl(rdev, true); - rdev->pm.planned_action = PM_ACTION_NONE; + if (position < vbl && position > 1) + in_vbl = false; + + return in_vbl; } -static void radeon_pm_set_clocks(struct radeon_device *rdev) +static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) { - radeon_get_power_state(rdev, rdev->pm.planned_action); - mutex_lock(&rdev->cp.mutex); - - if (rdev->pm.active_crtcs & (1 << 0)) { - rdev->pm.req_vblank |= (1 << 0); - drm_vblank_get(rdev->ddev, 0); - } - if (rdev->pm.active_crtcs & (1 << 1)) { - rdev->pm.req_vblank |= (1 << 1); - drm_vblank_get(rdev->ddev, 1); - } - if (rdev->pm.active_crtcs) - wait_event_interruptible_timeout( - rdev->irq.vblank_queue, 0, - msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); - if (rdev->pm.req_vblank & (1 << 0)) { - rdev->pm.req_vblank &= ~(1 << 0); - drm_vblank_put(rdev->ddev, 0); - } - if (rdev->pm.req_vblank & (1 << 1)) { - rdev->pm.req_vblank &= ~(1 << 1); - drm_vblank_put(rdev->ddev, 1); - } + u32 stat_crtc = 0; + bool in_vbl = radeon_pm_in_vbl(rdev); - radeon_pm_set_clocks_locked(rdev); - mutex_unlock(&rdev->cp.mutex); + if (in_vbl == false) + DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc, + finish ? "exit" : "entry"); + return in_vbl; } -static void radeon_pm_idle_work_handler(struct work_struct *work) +static void radeon_dynpm_idle_work_handler(struct work_struct *work) { struct radeon_device *rdev; + int resched; rdev = container_of(work, struct radeon_device, - pm.idle_work.work); + pm.dynpm_idle_work.work); + resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); mutex_lock(&rdev->pm.mutex); - if (rdev->pm.state == PM_STATE_ACTIVE) { + if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { unsigned long irq_flags; int not_processed = 0; @@ -393,36 +719,41 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); if (not_processed >= 3) { /* should upclock */ - if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { - rdev->pm.planned_action = PM_ACTION_NONE; - } else if (rdev->pm.planned_action == PM_ACTION_NONE && - rdev->pm.downclocked) { - rdev->pm.planned_action = - PM_ACTION_UPCLOCK; - rdev->pm.action_timeout = jiffies + + if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { + rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; + } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && + rdev->pm.dynpm_can_upclock) { + rdev->pm.dynpm_planned_action = + DYNPM_ACTION_UPCLOCK; + rdev->pm.dynpm_action_timeout = jiffies + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); } } else if (not_processed == 0) { /* should downclock */ - if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { - rdev->pm.planned_action = PM_ACTION_NONE; - } else if (rdev->pm.planned_action == PM_ACTION_NONE && - !rdev->pm.downclocked) { - rdev->pm.planned_action = - PM_ACTION_DOWNCLOCK; - rdev->pm.action_timeout = jiffies + + if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { + rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; + } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && + rdev->pm.dynpm_can_downclock) { + rdev->pm.dynpm_planned_action = + DYNPM_ACTION_DOWNCLOCK; + rdev->pm.dynpm_action_timeout = jiffies + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); } } - if (rdev->pm.planned_action != PM_ACTION_NONE && - jiffies > rdev->pm.action_timeout) { + /* Note, radeon_pm_set_clocks is called with static_switch set + * to false since we want to wait for vbl to avoid flicker. + */ + if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && + jiffies > rdev->pm.dynpm_action_timeout) { + radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); } - } - mutex_unlock(&rdev->pm.mutex); - queue_delayed_work(rdev->wq, &rdev->pm.idle_work, + queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } + mutex_unlock(&rdev->pm.mutex); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); } /* @@ -436,12 +767,13 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); + if (rdev->pm.current_vddc) + seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); if (rdev->asic->get_pcie_lanes) seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 5c0dc082d330..c332f46340d5 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -346,6 +346,7 @@ # define RADEON_TVPLL_PWRMGT_OFF (1 << 30) # define RADEON_TVCLK_TURNOFF (1 << 31) #define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ +# define RADEON_PM_MODE_SEL (1 << 13) # define RADEON_TCL_BYPASS_DISABLE (1 << 20) #define RADEON_CLR_CMP_CLR_3D 0x1a24 #define RADEON_CLR_CMP_CLR_DST 0x15c8 @@ -552,7 +553,6 @@ # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) #define RADEON_CRTC2_CRNT_FRAME 0x0314 #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 -#define RADEON_CRTC2_STATUS 0x03fc #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ @@ -994,6 +994,7 @@ # define RADEON_FP_DETECT_MASK (1 << 4) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_FP2_DETECT_MASK (1 << 10) +# define RADEON_GUI_IDLE_MASK (1 << 19) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define AVIVO_DISPLAY_INT_STATUS (1 << 0) @@ -1005,6 +1006,8 @@ # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_FP2_DETECT_STAT (1 << 10) # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) +# define RADEON_GUI_IDLE_STAT (1 << 19) +# define RADEON_GUI_IDLE_STAT_ACK (1 << 19) # define RADEON_SW_INT_FIRE (1 << 26) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index e50513a62735..261e98a276db 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" #include "radeon_drm.h" #include "radeon_reg.h" @@ -218,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev) void radeon_ib_pool_fini(struct radeon_device *rdev) { int r; + struct radeon_bo *robj; if (!rdev->ib_pool.ready) { return; } mutex_lock(&rdev->ib_pool.mutex); radeon_ib_bogus_cleanup(rdev); + robj = rdev->ib_pool.robj; + rdev->ib_pool.robj = NULL; + mutex_unlock(&rdev->ib_pool.mutex); - if (rdev->ib_pool.robj) { - r = radeon_bo_reserve(rdev->ib_pool.robj, false); + if (robj) { + r = radeon_bo_reserve(robj, false); if (likely(r == 0)) { - radeon_bo_kunmap(rdev->ib_pool.robj); - radeon_bo_unpin(rdev->ib_pool.robj); - radeon_bo_unreserve(rdev->ib_pool.robj); + radeon_bo_kunmap(robj); + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); } - radeon_bo_unref(&rdev->ib_pool.robj); - rdev->ib_pool.robj = NULL; + radeon_bo_unref(&robj); } - mutex_unlock(&rdev->ib_pool.mutex); } @@ -257,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) } } -int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) +int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) { int r; /* Align requested size with padding so unlock_commit can * pad safely */ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; - mutex_lock(&rdev->cp.mutex); while (ndw > (rdev->cp.ring_free_dw - 1)) { radeon_ring_free_size(rdev); if (ndw < rdev->cp.ring_free_dw) { break; } r = radeon_fence_wait_next(rdev); - if (r) { - mutex_unlock(&rdev->cp.mutex); + if (r) return r; - } } rdev->cp.count_dw = ndw; rdev->cp.wptr_old = rdev->cp.wptr; return 0; } -void radeon_ring_unlock_commit(struct radeon_device *rdev) +int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) +{ + int r; + + mutex_lock(&rdev->cp.mutex); + r = radeon_ring_alloc(rdev, ndw); + if (r) { + mutex_unlock(&rdev->cp.mutex); + return r; + } + return 0; +} + +void radeon_ring_commit(struct radeon_device *rdev) { unsigned count_dw_pad; unsigned i; @@ -294,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) } DRM_MEMORYBARRIER(); radeon_cp_commit(rdev); +} + +void radeon_ring_unlock_commit(struct radeon_device *rdev) +{ + radeon_ring_commit(rdev); mutex_unlock(&rdev->cp.mutex); } @@ -343,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) void radeon_ring_fini(struct radeon_device *rdev) { int r; + struct radeon_bo *ring_obj; mutex_lock(&rdev->cp.mutex); - if (rdev->cp.ring_obj) { - r = radeon_bo_reserve(rdev->cp.ring_obj, false); + ring_obj = rdev->cp.ring_obj; + rdev->cp.ring = NULL; + rdev->cp.ring_obj = NULL; + mutex_unlock(&rdev->cp.mutex); + + if (ring_obj) { + r = radeon_bo_reserve(ring_obj, false); if (likely(r == 0)) { - radeon_bo_kunmap(rdev->cp.ring_obj); - radeon_bo_unpin(rdev->cp.ring_obj); - radeon_bo_unreserve(rdev->cp.ring_obj); + radeon_bo_kunmap(ring_obj); + radeon_bo_unpin(ring_obj); + radeon_bo_unreserve(ring_obj); } - radeon_bo_unref(&rdev->cp.ring_obj); - rdev->cp.ring = NULL; - rdev->cp.ring_obj = NULL; + radeon_bo_unref(&ring_obj); } - mutex_unlock(&rdev->cp.mutex); } diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 40ab6d9c3736..b3ba44c0a818 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); - offset = *cmd << 10; + offset = *cmd3 << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, flags |= RADEON_FRONT; } if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { - if (!dev_priv->have_z_offset) + if (!dev_priv->have_z_offset) { printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); - flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + } } if (flags & (RADEON_FRONT | RADEON_BACK)) { @@ -2895,9 +2896,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, return rv; rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, cmdbuf->bufsz); - if (rv) + if (rv) { + drm_buffer_free(cmdbuf->buffer); return rv; - } + } + } else + goto done; orig_nbox = cmdbuf->nbox; @@ -2905,8 +2909,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, int temp; temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); return temp; } @@ -3012,16 +3015,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, } } - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); + done: DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0; err: - if (cmdbuf->bufsz != 0) - drm_buffer_free(cmdbuf->buffer); + drm_buffer_free(cmdbuf->buffer); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 43c5ab34b634..e9918d88f5b0 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -33,9 +33,11 @@ #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> +#include <ttm/ttm_page_alloc.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "radeon_reg.h" #include "radeon.h" @@ -161,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, (unsigned)type); return -EINVAL; } - man->io_offset = rdev->mc.agp_base; - man->io_size = rdev->mc.gtt_size; - man->io_addr = NULL; if (!rdev->ddev->agp->cant_use_aperture) - man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | - TTM_MEMTYPE_FLAG_MAPPABLE; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - } else -#endif - { - man->io_offset = 0; - man->io_size = 0; - man->io_addr = NULL; } +#endif break; case TTM_PL_VRAM: /* "On-card" video ram */ man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - man->io_addr = NULL; - man->io_offset = rdev->mc.aper_base; - man->io_size = rdev->mc.aper_size; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -243,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo, } static int radeon_move_blit(struct ttm_buffer_object *bo, - bool evict, int no_wait, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + bool evict, int no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem, + struct ttm_mem_reg *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; @@ -289,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, - evict, no_wait, new_mem); + evict, no_wait_reserve, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, bool no_wait, + bool evict, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; @@ -316,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait); + interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } @@ -330,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); + r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); + r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: if (tmp_mem.mm_node) { struct ttm_bo_global *glob = rdev->mman.bdev.glob; @@ -348,7 +338,8 @@ out_cleanup: } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, bool no_wait, + bool evict, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; @@ -368,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); + r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } @@ -393,8 +384,9 @@ out_cleanup: } static int radeon_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, bool no_wait, - struct ttm_mem_reg *new_mem) + bool evict, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; @@ -421,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = radeon_move_vram_ram(bo, evict, interruptible, - no_wait, new_mem); + no_wait_reserve, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = radeon_move_ram_vram(bo, evict, interruptible, - no_wait, new_mem); + no_wait_reserve, no_wait_gpu, new_mem); } else { - r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); + r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); } - return r; } +static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct radeon_device *rdev = radeon_get_rdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_TT: +#if __OS_HAS_AGP + if (rdev->flags & RADEON_IS_AGP) { + /* RADEON_IS_AGP is set only if AGP is active */ + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = rdev->mc.agp_base; + mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; + } +#endif + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + /* check if it's visible */ + if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) + return -EINVAL; + mem->bus.base = rdev->mc.aper_base; + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, bool lazy, bool interruptible) { @@ -478,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = { .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &radeon_bo_move_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify, + .io_mem_reserve = &radeon_ttm_io_mem_reserve, + .io_mem_free = &radeon_ttm_io_mem_free, }; int radeon_ttm_init(struct radeon_device *rdev) @@ -570,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL; static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo; + struct radeon_device *rdev; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; + bo = (struct ttm_buffer_object *)vma->vm_private_data; if (bo == NULL) { return VM_FAULT_NOPAGE; } + rdev = radeon_get_rdev(bo->bdev); + mutex_lock(&rdev->vram_mutex); r = ttm_vm_ops->fault(vma, vmf); + mutex_unlock(&rdev->vram_mutex); return r; } @@ -744,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) static int radeon_ttm_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; - static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; + static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1]; + static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32]; unsigned i; for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { @@ -762,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; } - return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); + /* Add ttm page pool to debugfs */ + sprintf(radeon_mem_types_names[i], "ttm_page_pool"); + radeon_mem_types_list[i].name = radeon_mem_types_names[i]; + radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; + radeon_mem_types_list[i].driver_features = 0; + radeon_mem_types_list[i].data = NULL; + return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1); #endif return 0; diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen new file mode 100644 index 000000000000..f78fd592544d --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -0,0 +1,611 @@ +evergreen 0x9400 +0x00008040 WAIT_UNTIL +0x00008044 WAIT_UNTIL_POLL_CNTL +0x00008048 WAIT_UNTIL_POLL_MASK +0x0000804c WAIT_UNTIL_POLL_REFDATA +0x000088B0 VGT_VTX_VECT_EJECT_REG +0x000088C4 VGT_CACHE_INVALIDATION +0x000088D4 VGT_GS_VERTEX_REUSE +0x00008958 VGT_PRIMITIVE_TYPE +0x0000895C VGT_INDEX_TYPE +0x00008970 VGT_NUM_INDICES +0x00008974 VGT_NUM_INSTANCES +0x00008990 VGT_COMPUTE_DIM_X +0x00008994 VGT_COMPUTE_DIM_Y +0x00008998 VGT_COMPUTE_DIM_Z +0x0000899C VGT_COMPUTE_START_X +0x000089A0 VGT_COMPUTE_START_Y +0x000089A4 VGT_COMPUTE_START_Z +0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE +0x00008A14 PA_CL_ENHANCE +0x00008A60 PA_SC_LINE_STIPPLE_VALUE +0x00008B10 PA_SC_LINE_STIPPLE_STATE +0x00008BF0 PA_SC_ENHANCE +0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ +0x00008C00 SQ_CONFIG +0x00008C04 SQ_GPR_RESOURCE_MGMT_1 +0x00008C08 SQ_GPR_RESOURCE_MGMT_2 +0x00008C0C SQ_GPR_RESOURCE_MGMT_3 +0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 +0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 +0x00008C18 SQ_THREAD_RESOURCE_MGMT +0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 +0x00008C20 SQ_STACK_RESOURCE_MGMT_1 +0x00008C24 SQ_STACK_RESOURCE_MGMT_2 +0x00008C28 SQ_STACK_RESOURCE_MGMT_3 +0x00008DF8 SQ_CONST_MEM_BASE +0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS +0x00009100 SPI_CONFIG_CNTL +0x0000913C SPI_CONFIG_CNTL_1 +0x00009700 VC_CNTL +0x00009714 VC_ENHANCE +0x00009830 DB_DEBUG +0x00009834 DB_DEBUG2 +0x00009838 DB_DEBUG3 +0x0000983C DB_DEBUG4 +0x00009854 DB_WATERMARKS +0x0000A400 TD_PS_BORDER_COLOR_INDEX +0x0000A404 TD_PS_BORDER_COLOR_RED +0x0000A408 TD_PS_BORDER_COLOR_GREEN +0x0000A40C TD_PS_BORDER_COLOR_BLUE +0x0000A410 TD_PS_BORDER_COLOR_ALPHA +0x0000A414 TD_VS_BORDER_COLOR_INDEX +0x0000A418 TD_VS_BORDER_COLOR_RED +0x0000A41C TD_VS_BORDER_COLOR_GREEN +0x0000A420 TD_VS_BORDER_COLOR_BLUE +0x0000A424 TD_VS_BORDER_COLOR_ALPHA +0x0000A428 TD_GS_BORDER_COLOR_INDEX +0x0000A42C TD_GS_BORDER_COLOR_RED +0x0000A430 TD_GS_BORDER_COLOR_GREEN +0x0000A434 TD_GS_BORDER_COLOR_BLUE +0x0000A438 TD_GS_BORDER_COLOR_ALPHA +0x0000A43C TD_HS_BORDER_COLOR_INDEX +0x0000A440 TD_HS_BORDER_COLOR_RED +0x0000A444 TD_HS_BORDER_COLOR_GREEN +0x0000A448 TD_HS_BORDER_COLOR_BLUE +0x0000A44C TD_HS_BORDER_COLOR_ALPHA +0x0000A450 TD_LS_BORDER_COLOR_INDEX +0x0000A454 TD_LS_BORDER_COLOR_RED +0x0000A458 TD_LS_BORDER_COLOR_GREEN +0x0000A45C TD_LS_BORDER_COLOR_BLUE +0x0000A460 TD_LS_BORDER_COLOR_ALPHA +0x0000A464 TD_CS_BORDER_COLOR_INDEX +0x0000A468 TD_CS_BORDER_COLOR_RED +0x0000A46C TD_CS_BORDER_COLOR_GREEN +0x0000A470 TD_CS_BORDER_COLOR_BLUE +0x0000A474 TD_CS_BORDER_COLOR_ALPHA +0x00028000 DB_RENDER_CONTROL +0x00028004 DB_COUNT_CONTROL +0x0002800C DB_RENDER_OVERRIDE +0x00028010 DB_RENDER_OVERRIDE2 +0x00028028 DB_STENCIL_CLEAR +0x0002802C DB_DEPTH_CLEAR +0x00028030 PA_SC_SCREEN_SCISSOR_TL +0x00028034 PA_SC_SCREEN_SCISSOR_BR +0x0002805C DB_DEPTH_SLICE +0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 +0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 +0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 +0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 +0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 +0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 +0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 +0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 +0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 +0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 +0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 +0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 +0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 +0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 +0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 +0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 +0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 +0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 +0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 +0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 +0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 +0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 +0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 +0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 +0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 +0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 +0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 +0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 +0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 +0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 +0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 +0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 +0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 +0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 +0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 +0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 +0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 +0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 +0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 +0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 +0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 +0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 +0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 +0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 +0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 +0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 +0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 +0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 +0x00028200 PA_SC_WINDOW_OFFSET +0x00028204 PA_SC_WINDOW_SCISSOR_TL +0x00028208 PA_SC_WINDOW_SCISSOR_BR +0x0002820C PA_SC_CLIPRECT_RULE +0x00028210 PA_SC_CLIPRECT_0_TL +0x00028214 PA_SC_CLIPRECT_0_BR +0x00028218 PA_SC_CLIPRECT_1_TL +0x0002821C PA_SC_CLIPRECT_1_BR +0x00028220 PA_SC_CLIPRECT_2_TL +0x00028224 PA_SC_CLIPRECT_2_BR +0x00028228 PA_SC_CLIPRECT_3_TL +0x0002822C PA_SC_CLIPRECT_3_BR +0x00028230 PA_SC_EDGERULE +0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET +0x00028240 PA_SC_GENERIC_SCISSOR_TL +0x00028244 PA_SC_GENERIC_SCISSOR_BR +0x00028250 PA_SC_VPORT_SCISSOR_0_TL +0x00028254 PA_SC_VPORT_SCISSOR_0_BR +0x00028258 PA_SC_VPORT_SCISSOR_1_TL +0x0002825C PA_SC_VPORT_SCISSOR_1_BR +0x00028260 PA_SC_VPORT_SCISSOR_2_TL +0x00028264 PA_SC_VPORT_SCISSOR_2_BR +0x00028268 PA_SC_VPORT_SCISSOR_3_TL +0x0002826C PA_SC_VPORT_SCISSOR_3_BR +0x00028270 PA_SC_VPORT_SCISSOR_4_TL +0x00028274 PA_SC_VPORT_SCISSOR_4_BR +0x00028278 PA_SC_VPORT_SCISSOR_5_TL +0x0002827C PA_SC_VPORT_SCISSOR_5_BR +0x00028280 PA_SC_VPORT_SCISSOR_6_TL +0x00028284 PA_SC_VPORT_SCISSOR_6_BR +0x00028288 PA_SC_VPORT_SCISSOR_7_TL +0x0002828C PA_SC_VPORT_SCISSOR_7_BR +0x00028290 PA_SC_VPORT_SCISSOR_8_TL +0x00028294 PA_SC_VPORT_SCISSOR_8_BR +0x00028298 PA_SC_VPORT_SCISSOR_9_TL +0x0002829C PA_SC_VPORT_SCISSOR_9_BR +0x000282A0 PA_SC_VPORT_SCISSOR_10_TL +0x000282A4 PA_SC_VPORT_SCISSOR_10_BR +0x000282A8 PA_SC_VPORT_SCISSOR_11_TL +0x000282AC PA_SC_VPORT_SCISSOR_11_BR +0x000282B0 PA_SC_VPORT_SCISSOR_12_TL +0x000282B4 PA_SC_VPORT_SCISSOR_12_BR +0x000282B8 PA_SC_VPORT_SCISSOR_13_TL +0x000282BC PA_SC_VPORT_SCISSOR_13_BR +0x000282C0 PA_SC_VPORT_SCISSOR_14_TL +0x000282C4 PA_SC_VPORT_SCISSOR_14_BR +0x000282C8 PA_SC_VPORT_SCISSOR_15_TL +0x000282CC PA_SC_VPORT_SCISSOR_15_BR +0x000282D0 PA_SC_VPORT_ZMIN_0 +0x000282D4 PA_SC_VPORT_ZMAX_0 +0x000282D8 PA_SC_VPORT_ZMIN_1 +0x000282DC PA_SC_VPORT_ZMAX_1 +0x000282E0 PA_SC_VPORT_ZMIN_2 +0x000282E4 PA_SC_VPORT_ZMAX_2 +0x000282E8 PA_SC_VPORT_ZMIN_3 +0x000282EC PA_SC_VPORT_ZMAX_3 +0x000282F0 PA_SC_VPORT_ZMIN_4 +0x000282F4 PA_SC_VPORT_ZMAX_4 +0x000282F8 PA_SC_VPORT_ZMIN_5 +0x000282FC PA_SC_VPORT_ZMAX_5 +0x00028300 PA_SC_VPORT_ZMIN_6 +0x00028304 PA_SC_VPORT_ZMAX_6 +0x00028308 PA_SC_VPORT_ZMIN_7 +0x0002830C PA_SC_VPORT_ZMAX_7 +0x00028310 PA_SC_VPORT_ZMIN_8 +0x00028314 PA_SC_VPORT_ZMAX_8 +0x00028318 PA_SC_VPORT_ZMIN_9 +0x0002831C PA_SC_VPORT_ZMAX_9 +0x00028320 PA_SC_VPORT_ZMIN_10 +0x00028324 PA_SC_VPORT_ZMAX_10 +0x00028328 PA_SC_VPORT_ZMIN_11 +0x0002832C PA_SC_VPORT_ZMAX_11 +0x00028330 PA_SC_VPORT_ZMIN_12 +0x00028334 PA_SC_VPORT_ZMAX_12 +0x00028338 PA_SC_VPORT_ZMIN_13 +0x0002833C PA_SC_VPORT_ZMAX_13 +0x00028340 PA_SC_VPORT_ZMIN_14 +0x00028344 PA_SC_VPORT_ZMAX_14 +0x00028348 PA_SC_VPORT_ZMIN_15 +0x0002834C PA_SC_VPORT_ZMAX_15 +0x00028350 SX_MISC +0x00028380 SQ_VTX_SEMANTIC_0 +0x00028384 SQ_VTX_SEMANTIC_1 +0x00028388 SQ_VTX_SEMANTIC_2 +0x0002838C SQ_VTX_SEMANTIC_3 +0x00028390 SQ_VTX_SEMANTIC_4 +0x00028394 SQ_VTX_SEMANTIC_5 +0x00028398 SQ_VTX_SEMANTIC_6 +0x0002839C SQ_VTX_SEMANTIC_7 +0x000283A0 SQ_VTX_SEMANTIC_8 +0x000283A4 SQ_VTX_SEMANTIC_9 +0x000283A8 SQ_VTX_SEMANTIC_10 +0x000283AC SQ_VTX_SEMANTIC_11 +0x000283B0 SQ_VTX_SEMANTIC_12 +0x000283B4 SQ_VTX_SEMANTIC_13 +0x000283B8 SQ_VTX_SEMANTIC_14 +0x000283BC SQ_VTX_SEMANTIC_15 +0x000283C0 SQ_VTX_SEMANTIC_16 +0x000283C4 SQ_VTX_SEMANTIC_17 +0x000283C8 SQ_VTX_SEMANTIC_18 +0x000283CC SQ_VTX_SEMANTIC_19 +0x000283D0 SQ_VTX_SEMANTIC_20 +0x000283D4 SQ_VTX_SEMANTIC_21 +0x000283D8 SQ_VTX_SEMANTIC_22 +0x000283DC SQ_VTX_SEMANTIC_23 +0x000283E0 SQ_VTX_SEMANTIC_24 +0x000283E4 SQ_VTX_SEMANTIC_25 +0x000283E8 SQ_VTX_SEMANTIC_26 +0x000283EC SQ_VTX_SEMANTIC_27 +0x000283F0 SQ_VTX_SEMANTIC_28 +0x000283F4 SQ_VTX_SEMANTIC_29 +0x000283F8 SQ_VTX_SEMANTIC_30 +0x000283FC SQ_VTX_SEMANTIC_31 +0x00028400 VGT_MAX_VTX_INDX +0x00028404 VGT_MIN_VTX_INDX +0x00028408 VGT_INDX_OFFSET +0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX +0x00028410 SX_ALPHA_TEST_CONTROL +0x00028414 CB_BLEND_RED +0x00028418 CB_BLEND_GREEN +0x0002841C CB_BLEND_BLUE +0x00028420 CB_BLEND_ALPHA +0x00028430 DB_STENCILREFMASK +0x00028434 DB_STENCILREFMASK_BF +0x00028438 SX_ALPHA_REF +0x0002843C PA_CL_VPORT_XSCALE_0 +0x00028440 PA_CL_VPORT_XOFFSET_0 +0x00028444 PA_CL_VPORT_YSCALE_0 +0x00028448 PA_CL_VPORT_YOFFSET_0 +0x0002844C PA_CL_VPORT_ZSCALE_0 +0x00028450 PA_CL_VPORT_ZOFFSET_0 +0x00028454 PA_CL_VPORT_XSCALE_1 +0x00028458 PA_CL_VPORT_XOFFSET_1 +0x0002845C PA_CL_VPORT_YSCALE_1 +0x00028460 PA_CL_VPORT_YOFFSET_1 +0x00028464 PA_CL_VPORT_ZSCALE_1 +0x00028468 PA_CL_VPORT_ZOFFSET_1 +0x0002846C PA_CL_VPORT_XSCALE_2 +0x00028470 PA_CL_VPORT_XOFFSET_2 +0x00028474 PA_CL_VPORT_YSCALE_2 +0x00028478 PA_CL_VPORT_YOFFSET_2 +0x0002847C PA_CL_VPORT_ZSCALE_2 +0x00028480 PA_CL_VPORT_ZOFFSET_2 +0x00028484 PA_CL_VPORT_XSCALE_3 +0x00028488 PA_CL_VPORT_XOFFSET_3 +0x0002848C PA_CL_VPORT_YSCALE_3 +0x00028490 PA_CL_VPORT_YOFFSET_3 +0x00028494 PA_CL_VPORT_ZSCALE_3 +0x00028498 PA_CL_VPORT_ZOFFSET_3 +0x0002849C PA_CL_VPORT_XSCALE_4 +0x000284A0 PA_CL_VPORT_XOFFSET_4 +0x000284A4 PA_CL_VPORT_YSCALE_4 +0x000284A8 PA_CL_VPORT_YOFFSET_4 +0x000284AC PA_CL_VPORT_ZSCALE_4 +0x000284B0 PA_CL_VPORT_ZOFFSET_4 +0x000284B4 PA_CL_VPORT_XSCALE_5 +0x000284B8 PA_CL_VPORT_XOFFSET_5 +0x000284BC PA_CL_VPORT_YSCALE_5 +0x000284C0 PA_CL_VPORT_YOFFSET_5 +0x000284C4 PA_CL_VPORT_ZSCALE_5 +0x000284C8 PA_CL_VPORT_ZOFFSET_5 +0x000284CC PA_CL_VPORT_XSCALE_6 +0x000284D0 PA_CL_VPORT_XOFFSET_6 +0x000284D4 PA_CL_VPORT_YSCALE_6 +0x000284D8 PA_CL_VPORT_YOFFSET_6 +0x000284DC PA_CL_VPORT_ZSCALE_6 +0x000284E0 PA_CL_VPORT_ZOFFSET_6 +0x000284E4 PA_CL_VPORT_XSCALE_7 +0x000284E8 PA_CL_VPORT_XOFFSET_7 +0x000284EC PA_CL_VPORT_YSCALE_7 +0x000284F0 PA_CL_VPORT_YOFFSET_7 +0x000284F4 PA_CL_VPORT_ZSCALE_7 +0x000284F8 PA_CL_VPORT_ZOFFSET_7 +0x000284FC PA_CL_VPORT_XSCALE_8 +0x00028500 PA_CL_VPORT_XOFFSET_8 +0x00028504 PA_CL_VPORT_YSCALE_8 +0x00028508 PA_CL_VPORT_YOFFSET_8 +0x0002850C PA_CL_VPORT_ZSCALE_8 +0x00028510 PA_CL_VPORT_ZOFFSET_8 +0x00028514 PA_CL_VPORT_XSCALE_9 +0x00028518 PA_CL_VPORT_XOFFSET_9 +0x0002851C PA_CL_VPORT_YSCALE_9 +0x00028520 PA_CL_VPORT_YOFFSET_9 +0x00028524 PA_CL_VPORT_ZSCALE_9 +0x00028528 PA_CL_VPORT_ZOFFSET_9 +0x0002852C PA_CL_VPORT_XSCALE_10 +0x00028530 PA_CL_VPORT_XOFFSET_10 +0x00028534 PA_CL_VPORT_YSCALE_10 +0x00028538 PA_CL_VPORT_YOFFSET_10 +0x0002853C PA_CL_VPORT_ZSCALE_10 +0x00028540 PA_CL_VPORT_ZOFFSET_10 +0x00028544 PA_CL_VPORT_XSCALE_11 +0x00028548 PA_CL_VPORT_XOFFSET_11 +0x0002854C PA_CL_VPORT_YSCALE_11 +0x00028550 PA_CL_VPORT_YOFFSET_11 +0x00028554 PA_CL_VPORT_ZSCALE_11 +0x00028558 PA_CL_VPORT_ZOFFSET_11 +0x0002855C PA_CL_VPORT_XSCALE_12 +0x00028560 PA_CL_VPORT_XOFFSET_12 +0x00028564 PA_CL_VPORT_YSCALE_12 +0x00028568 PA_CL_VPORT_YOFFSET_12 +0x0002856C PA_CL_VPORT_ZSCALE_12 +0x00028570 PA_CL_VPORT_ZOFFSET_12 +0x00028574 PA_CL_VPORT_XSCALE_13 +0x00028578 PA_CL_VPORT_XOFFSET_13 +0x0002857C PA_CL_VPORT_YSCALE_13 +0x00028580 PA_CL_VPORT_YOFFSET_13 +0x00028584 PA_CL_VPORT_ZSCALE_13 +0x00028588 PA_CL_VPORT_ZOFFSET_13 +0x0002858C PA_CL_VPORT_XSCALE_14 +0x00028590 PA_CL_VPORT_XOFFSET_14 +0x00028594 PA_CL_VPORT_YSCALE_14 +0x00028598 PA_CL_VPORT_YOFFSET_14 +0x0002859C PA_CL_VPORT_ZSCALE_14 +0x000285A0 PA_CL_VPORT_ZOFFSET_14 +0x000285A4 PA_CL_VPORT_XSCALE_15 +0x000285A8 PA_CL_VPORT_XOFFSET_15 +0x000285AC PA_CL_VPORT_YSCALE_15 +0x000285B0 PA_CL_VPORT_YOFFSET_15 +0x000285B4 PA_CL_VPORT_ZSCALE_15 +0x000285B8 PA_CL_VPORT_ZOFFSET_15 +0x000285BC PA_CL_UCP_0_X +0x000285C0 PA_CL_UCP_0_Y +0x000285C4 PA_CL_UCP_0_Z +0x000285C8 PA_CL_UCP_0_W +0x000285CC PA_CL_UCP_1_X +0x000285D0 PA_CL_UCP_1_Y +0x000285D4 PA_CL_UCP_1_Z +0x000285D8 PA_CL_UCP_1_W +0x000285DC PA_CL_UCP_2_X +0x000285E0 PA_CL_UCP_2_Y +0x000285E4 PA_CL_UCP_2_Z +0x000285E8 PA_CL_UCP_2_W +0x000285EC PA_CL_UCP_3_X +0x000285F0 PA_CL_UCP_3_Y +0x000285F4 PA_CL_UCP_3_Z +0x000285F8 PA_CL_UCP_3_W +0x000285FC PA_CL_UCP_4_X +0x00028600 PA_CL_UCP_4_Y +0x00028604 PA_CL_UCP_4_Z +0x00028608 PA_CL_UCP_4_W +0x0002860C PA_CL_UCP_5_X +0x00028610 PA_CL_UCP_5_Y +0x00028614 PA_CL_UCP_5_Z +0x00028618 PA_CL_UCP_5_W +0x0002861C SPI_VS_OUT_ID_0 +0x00028620 SPI_VS_OUT_ID_1 +0x00028624 SPI_VS_OUT_ID_2 +0x00028628 SPI_VS_OUT_ID_3 +0x0002862C SPI_VS_OUT_ID_4 +0x00028630 SPI_VS_OUT_ID_5 +0x00028634 SPI_VS_OUT_ID_6 +0x00028638 SPI_VS_OUT_ID_7 +0x0002863C SPI_VS_OUT_ID_8 +0x00028640 SPI_VS_OUT_ID_9 +0x00028644 SPI_PS_INPUT_CNTL_0 +0x00028648 SPI_PS_INPUT_CNTL_1 +0x0002864C SPI_PS_INPUT_CNTL_2 +0x00028650 SPI_PS_INPUT_CNTL_3 +0x00028654 SPI_PS_INPUT_CNTL_4 +0x00028658 SPI_PS_INPUT_CNTL_5 +0x0002865C SPI_PS_INPUT_CNTL_6 +0x00028660 SPI_PS_INPUT_CNTL_7 +0x00028664 SPI_PS_INPUT_CNTL_8 +0x00028668 SPI_PS_INPUT_CNTL_9 +0x0002866C SPI_PS_INPUT_CNTL_10 +0x00028670 SPI_PS_INPUT_CNTL_11 +0x00028674 SPI_PS_INPUT_CNTL_12 +0x00028678 SPI_PS_INPUT_CNTL_13 +0x0002867C SPI_PS_INPUT_CNTL_14 +0x00028680 SPI_PS_INPUT_CNTL_15 +0x00028684 SPI_PS_INPUT_CNTL_16 +0x00028688 SPI_PS_INPUT_CNTL_17 +0x0002868C SPI_PS_INPUT_CNTL_18 +0x00028690 SPI_PS_INPUT_CNTL_19 +0x00028694 SPI_PS_INPUT_CNTL_20 +0x00028698 SPI_PS_INPUT_CNTL_21 +0x0002869C SPI_PS_INPUT_CNTL_22 +0x000286A0 SPI_PS_INPUT_CNTL_23 +0x000286A4 SPI_PS_INPUT_CNTL_24 +0x000286A8 SPI_PS_INPUT_CNTL_25 +0x000286AC SPI_PS_INPUT_CNTL_26 +0x000286B0 SPI_PS_INPUT_CNTL_27 +0x000286B4 SPI_PS_INPUT_CNTL_28 +0x000286B8 SPI_PS_INPUT_CNTL_29 +0x000286BC SPI_PS_INPUT_CNTL_30 +0x000286C0 SPI_PS_INPUT_CNTL_31 +0x000286C4 SPI_VS_OUT_CONFIG +0x000286C8 SPI_THREAD_GROUPING +0x000286CC SPI_PS_IN_CONTROL_0 +0x000286D0 SPI_PS_IN_CONTROL_1 +0x000286D4 SPI_INTERP_CONTROL_0 +0x000286D8 SPI_INPUT_Z +0x000286DC SPI_FOG_CNTL +0x000286E0 SPI_BARYC_CNTL +0x000286E4 SPI_PS_IN_CONTROL_2 +0x000286E8 SPI_COMPUTE_INPUT_CNTL +0x000286EC SPI_COMPUTE_NUM_THREAD_X +0x000286F0 SPI_COMPUTE_NUM_THREAD_Y +0x000286F4 SPI_COMPUTE_NUM_THREAD_Z +0x000286F8 GDS_ADDR_SIZE +0x00028780 CB_BLEND0_CONTROL +0x00028784 CB_BLEND1_CONTROL +0x00028788 CB_BLEND2_CONTROL +0x0002878C CB_BLEND3_CONTROL +0x00028790 CB_BLEND4_CONTROL +0x00028794 CB_BLEND5_CONTROL +0x00028798 CB_BLEND6_CONTROL +0x0002879C CB_BLEND7_CONTROL +0x000287CC CS_COPY_STATE +0x000287D0 GFX_COPY_STATE +0x000287D4 PA_CL_POINT_X_RAD +0x000287D8 PA_CL_POINT_Y_RAD +0x000287DC PA_CL_POINT_SIZE +0x000287E0 PA_CL_POINT_CULL_RAD +0x00028808 CB_COLOR_CONTROL +0x0002880C DB_SHADER_CONTROL +0x00028810 PA_CL_CLIP_CNTL +0x00028814 PA_SU_SC_MODE_CNTL +0x00028818 PA_CL_VTE_CNTL +0x0002881C PA_CL_VS_OUT_CNTL +0x00028820 PA_CL_NANINF_CNTL +0x00028824 PA_SU_LINE_STIPPLE_CNTL +0x00028828 PA_SU_LINE_STIPPLE_SCALE +0x0002882C PA_SU_PRIM_FILTER_CNTL +0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 +0x00028844 SQ_PGM_RESOURCES_PS +0x00028848 SQ_PGM_RESOURCES_2_PS +0x0002884C SQ_PGM_EXPORTS_PS +0x00028860 SQ_PGM_RESOURCES_VS +0x00028864 SQ_PGM_RESOURCES_2_VS +0x00028878 SQ_PGM_RESOURCES_GS +0x0002887C SQ_PGM_RESOURCES_2_GS +0x00028890 SQ_PGM_RESOURCES_ES +0x00028894 SQ_PGM_RESOURCES_2_ES +0x000288A8 SQ_PGM_RESOURCES_FS +0x000288BC SQ_PGM_RESOURCES_HS +0x000288C0 SQ_PGM_RESOURCES_2_HS +0x000288D4 SQ_PGM_RESOURCES_LS +0x000288D8 SQ_PGM_RESOURCES_2_LS +0x000288E8 SQ_LDS_ALLOC +0x000288EC SQ_LDS_ALLOC_PS +0x000288F0 SQ_VTX_SEMANTIC_CLEAR +0x00028A00 PA_SU_POINT_SIZE +0x00028A04 PA_SU_POINT_MINMAX +0x00028A08 PA_SU_LINE_CNTL +0x00028A0C PA_SC_LINE_STIPPLE +0x00028A10 VGT_OUTPUT_PATH_CNTL +0x00028A14 VGT_HOS_CNTL +0x00028A18 VGT_HOS_MAX_TESS_LEVEL +0x00028A1C VGT_HOS_MIN_TESS_LEVEL +0x00028A20 VGT_HOS_REUSE_DEPTH +0x00028A24 VGT_GROUP_PRIM_TYPE +0x00028A28 VGT_GROUP_FIRST_DECR +0x00028A2C VGT_GROUP_DECR +0x00028A30 VGT_GROUP_VECT_0_CNTL +0x00028A34 VGT_GROUP_VECT_1_CNTL +0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL +0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL +0x00028A40 VGT_GS_MODE +0x00028A48 PA_SC_MODE_CNTL_0 +0x00028A4C PA_SC_MODE_CNTL_1 +0x00028A50 VGT_ENHANCE +0x00028A54 VGT_GS_PER_ES +0x00028A58 VGT_ES_PER_GS +0x00028A5C VGT_GS_PER_VS +0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x00028A84 VGT_PRIMITIVEID_EN +0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN +0x00028AA0 VGT_INSTANCE_STEP_RATE_0 +0x00028AA4 VGT_INSTANCE_STEP_RATE_1 +0x00028AB4 VGT_REUSE_OFF +0x00028AB8 VGT_VTX_CNT_EN +0x00028ABC DB_HTILE_SURFACE +0x00028AC0 DB_SRESULTS_COMPARE_STATE0 +0x00028AC4 DB_SRESULTS_COMPARE_STATE1 +0x00028AC8 DB_PRELOAD_CONTROL +0x00028B38 VGT_GS_MAX_VERT_OUT +0x00028B54 VGT_SHADER_STAGES_EN +0x00028B58 VGT_LS_HS_CONFIG +0x00028B5C VGT_LS_SIZE +0x00028B60 VGT_HS_SIZE +0x00028B64 VGT_LS_HS_ALLOC +0x00028B68 VGT_HS_PATCH_CONST +0x00028B6C VGT_TF_PARAM +0x00028B70 DB_ALPHA_TO_MASK +0x00028B74 VGT_DISPATCH_INITIATOR +0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL +0x00028B7C PA_SU_POLY_OFFSET_CLAMP +0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE +0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET +0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE +0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET +0x00028B74 VGT_GS_INSTANCE_CNT +0x00028C00 PA_SC_LINE_CNTL +0x00028C08 PA_SU_VTX_CNTL +0x00028C0C PA_CL_GB_VERT_CLIP_ADJ +0x00028C10 PA_CL_GB_VERT_DISC_ADJ +0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ +0x00028C18 PA_CL_GB_HORZ_DISC_ADJ +0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 +0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 +0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 +0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 +0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 +0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 +0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 +0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 +0x00028C3C PA_SC_AA_MASK +0x00028C8C CB_COLOR0_CLEAR_WORD0 +0x00028C90 CB_COLOR0_CLEAR_WORD1 +0x00028C94 CB_COLOR0_CLEAR_WORD2 +0x00028C98 CB_COLOR0_CLEAR_WORD3 +0x00028CC8 CB_COLOR1_CLEAR_WORD0 +0x00028CCC CB_COLOR1_CLEAR_WORD1 +0x00028CD0 CB_COLOR1_CLEAR_WORD2 +0x00028CD4 CB_COLOR1_CLEAR_WORD3 +0x00028D04 CB_COLOR2_CLEAR_WORD0 +0x00028D08 CB_COLOR2_CLEAR_WORD1 +0x00028D0C CB_COLOR2_CLEAR_WORD2 +0x00028D10 CB_COLOR2_CLEAR_WORD3 +0x00028D40 CB_COLOR3_CLEAR_WORD0 +0x00028D44 CB_COLOR3_CLEAR_WORD1 +0x00028D48 CB_COLOR3_CLEAR_WORD2 +0x00028D4C CB_COLOR3_CLEAR_WORD3 +0x00028D7C CB_COLOR4_CLEAR_WORD0 +0x00028D80 CB_COLOR4_CLEAR_WORD1 +0x00028D84 CB_COLOR4_CLEAR_WORD2 +0x00028D88 CB_COLOR4_CLEAR_WORD3 +0x00028DB8 CB_COLOR5_CLEAR_WORD0 +0x00028DBC CB_COLOR5_CLEAR_WORD1 +0x00028DC0 CB_COLOR5_CLEAR_WORD2 +0x00028DC4 CB_COLOR5_CLEAR_WORD3 +0x00028DF4 CB_COLOR6_CLEAR_WORD0 +0x00028DF8 CB_COLOR6_CLEAR_WORD1 +0x00028DFC CB_COLOR6_CLEAR_WORD2 +0x00028E00 CB_COLOR6_CLEAR_WORD3 +0x00028E30 CB_COLOR7_CLEAR_WORD0 +0x00028E34 CB_COLOR7_CLEAR_WORD1 +0x00028E38 CB_COLOR7_CLEAR_WORD2 +0x00028E3C CB_COLOR7_CLEAR_WORD3 +0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 +0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 +0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 +0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 +0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 +0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 +0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 +0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 +0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 +0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 +0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 +0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 +0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 +0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 +0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 +0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 +0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 +0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 +0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 +0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 +0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 +0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 +0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 +0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 +0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 +0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 +0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 +0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 +0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 +0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 +0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 +0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 +0x0003CFF0 SQ_VTX_BASE_VTX_LOC +0x0003CFF4 SQ_VTX_START_INST_LOC +0x0003FF00 SQ_TEX_SAMPLER_CLEAR +0x0003FF04 SQ_TEX_RESOURCE_CLEAR +0x0003FF08 SQ_LOOP_BOOL_CLEAR diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 19c4663fa9c6..1e97b2d129fd 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 @@ -125,6 +125,8 @@ r300 0x4f60 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 989f7a020832..e958980d00f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 @@ -125,6 +125,8 @@ r420 0x4f60 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 8f414a5f520f..af0da4ae3f55 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -26,20 +26,16 @@ r600 0x9400 0x00028408 VGT_INDX_OFFSET 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 -0x000088C0 VGT_LAST_COPY_STATE 0x00028400 VGT_MAX_VTX_INDX -0x000088D8 VGT_MC_LAT_CNTL 0x00028404 VGT_MIN_VTX_INDX 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX 0x00008970 VGT_NUM_INDICES 0x00008974 VGT_NUM_INSTANCES 0x00028A10 VGT_OUTPUT_PATH_CNTL -0x00028C5C VGT_OUT_DEALLOC_CNTL 0x00028A84 VGT_PRIMITIVEID_EN 0x00008958 VGT_PRIMITIVE_TYPE 0x00028AB4 VGT_REUSE_OFF -0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL 0x00028AB8 VGT_VTX_CNT_EN 0x000088B0 VGT_VTX_VECT_EJECT_REG 0x00028810 PA_CL_CLIP_CNTL @@ -280,7 +276,6 @@ r600 0x9400 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE 0x00028814 PA_SU_SC_MODE_CNTL 0x00028C08 PA_SU_VTX_CNTL -0x00008C00 SQ_CONFIG 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 0x00008C10 SQ_STACK_RESOURCE_MGMT_1 @@ -320,18 +315,6 @@ r600 0x9400 0x000283FC SQ_VTX_SEMANTIC_31 0x000288E0 SQ_VTX_SEMANTIC_CLEAR 0x0003CFF4 SQ_VTX_START_INST_LOC -0x0003C000 SQ_TEX_SAMPLER_WORD0_0 -0x0003C004 SQ_TEX_SAMPLER_WORD1_0 -0x0003C008 SQ_TEX_SAMPLER_WORD2_0 -0x00030000 SQ_ALU_CONSTANT0_0 -0x00030004 SQ_ALU_CONSTANT1_0 -0x00030008 SQ_ALU_CONSTANT2_0 -0x0003000C SQ_ALU_CONSTANT3_0 -0x0003E380 SQ_BOOL_CONST_0 -0x0003E384 SQ_BOOL_CONST_1 -0x0003E388 SQ_BOOL_CONST_2 -0x0003E200 SQ_LOOP_CONST_0 -0x0003E200 SQ_LOOP_CONST_DX10_0 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 @@ -380,54 +363,6 @@ r600 0x9400 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 -0x000289C0 SQ_ALU_CONST_CACHE_GS_0 -0x000289C4 SQ_ALU_CONST_CACHE_GS_1 -0x000289C8 SQ_ALU_CONST_CACHE_GS_2 -0x000289CC SQ_ALU_CONST_CACHE_GS_3 -0x000289D0 SQ_ALU_CONST_CACHE_GS_4 -0x000289D4 SQ_ALU_CONST_CACHE_GS_5 -0x000289D8 SQ_ALU_CONST_CACHE_GS_6 -0x000289DC SQ_ALU_CONST_CACHE_GS_7 -0x000289E0 SQ_ALU_CONST_CACHE_GS_8 -0x000289E4 SQ_ALU_CONST_CACHE_GS_9 -0x000289E8 SQ_ALU_CONST_CACHE_GS_10 -0x000289EC SQ_ALU_CONST_CACHE_GS_11 -0x000289F0 SQ_ALU_CONST_CACHE_GS_12 -0x000289F4 SQ_ALU_CONST_CACHE_GS_13 -0x000289F8 SQ_ALU_CONST_CACHE_GS_14 -0x000289FC SQ_ALU_CONST_CACHE_GS_15 -0x00028940 SQ_ALU_CONST_CACHE_PS_0 -0x00028944 SQ_ALU_CONST_CACHE_PS_1 -0x00028948 SQ_ALU_CONST_CACHE_PS_2 -0x0002894C SQ_ALU_CONST_CACHE_PS_3 -0x00028950 SQ_ALU_CONST_CACHE_PS_4 -0x00028954 SQ_ALU_CONST_CACHE_PS_5 -0x00028958 SQ_ALU_CONST_CACHE_PS_6 -0x0002895C SQ_ALU_CONST_CACHE_PS_7 -0x00028960 SQ_ALU_CONST_CACHE_PS_8 -0x00028964 SQ_ALU_CONST_CACHE_PS_9 -0x00028968 SQ_ALU_CONST_CACHE_PS_10 -0x0002896C SQ_ALU_CONST_CACHE_PS_11 -0x00028970 SQ_ALU_CONST_CACHE_PS_12 -0x00028974 SQ_ALU_CONST_CACHE_PS_13 -0x00028978 SQ_ALU_CONST_CACHE_PS_14 -0x0002897C SQ_ALU_CONST_CACHE_PS_15 -0x00028980 SQ_ALU_CONST_CACHE_VS_0 -0x00028984 SQ_ALU_CONST_CACHE_VS_1 -0x00028988 SQ_ALU_CONST_CACHE_VS_2 -0x0002898C SQ_ALU_CONST_CACHE_VS_3 -0x00028990 SQ_ALU_CONST_CACHE_VS_4 -0x00028994 SQ_ALU_CONST_CACHE_VS_5 -0x00028998 SQ_ALU_CONST_CACHE_VS_6 -0x0002899C SQ_ALU_CONST_CACHE_VS_7 -0x000289A0 SQ_ALU_CONST_CACHE_VS_8 -0x000289A4 SQ_ALU_CONST_CACHE_VS_9 -0x000289A8 SQ_ALU_CONST_CACHE_VS_10 -0x000289AC SQ_ALU_CONST_CACHE_VS_11 -0x000289B0 SQ_ALU_CONST_CACHE_VS_12 -0x000289B4 SQ_ALU_CONST_CACHE_VS_13 -0x000289B8 SQ_ALU_CONST_CACHE_VS_14 -0x000289BC SQ_ALU_CONST_CACHE_VS_15 0x000288D8 SQ_PGM_CF_OFFSET_ES 0x000288DC SQ_PGM_CF_OFFSET_FS 0x000288D4 SQ_PGM_CF_OFFSET_GS @@ -494,12 +429,7 @@ r600 0x9400 0x00028438 SX_ALPHA_REF 0x00028410 SX_ALPHA_TEST_CONTROL 0x00028350 SX_MISC -0x0000A020 SMX_DC_CTL0 -0x0000A024 SMX_DC_CTL1 -0x0000A028 SMX_DC_CTL2 -0x00009608 TC_CNTL 0x00009604 TC_INVALIDATE -0x00009490 TD_CNTL 0x00009400 TD_FILTER4 0x00009404 TD_FILTER4_1 0x00009408 TD_FILTER4_2 @@ -824,14 +754,9 @@ r600 0x9400 0x00028428 CB_FOG_GREEN 0x00028424 CB_FOG_RED 0x00008040 WAIT_UNTIL -0x00008950 CC_GC_SHADER_PIPE_CONFIG -0x00008954 GC_USER_SHADER_PIPE_CONFIG 0x00009714 VC_ENHANCE 0x00009830 DB_DEBUG 0x00009838 DB_WATERMARKS 0x00028D28 DB_SRESULTS_COMPARE_STATE0 0x00028D44 DB_ALPHA_TO_MASK -0x00009504 TA_CNTL 0x00009700 VC_CNTL -0x00009718 VC_CONFIG -0x0000A02C SMX_DC_MC_INTF_CTL diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 6801b865d1c4..83e8bc0c2bb2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 @@ -125,6 +125,8 @@ rs600 0x6d40 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 38abf63bf2cd..1e46233985eb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -35,6 +35,7 @@ rv515 0x6d40 0x1DA8 VAP_VPORT_ZSCALE 0x1DAC VAP_VPORT_ZOFFSET 0x2080 VAP_CNTL +0x208C VAP_INDEX_OFFSET 0x2090 VAP_OUT_VTX_FMT_0 0x2094 VAP_OUT_VTX_FMT_1 0x20B0 VAP_VTE_CNTL @@ -158,6 +159,8 @@ rv515 0x6d40 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 626d51891ee9..f454c9a5e7f2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -26,8 +26,10 @@ * Jerome Glisse */ #include <linux/seq_file.h> +#include <linux/slab.h> #include <drm/drmP.h> #include "radeon.h" +#include "radeon_asic.h" #include "rs400d.h" /* This files gather functions specifics to : rs400,rs480 */ @@ -55,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev) } if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { /* FIXME: RS400 & RS480 seems to have issue with GART size - * if 4G of system memory (needs more testing) */ + * if 4G of system memory (needs more testing) + */ + /* XXX is this still an issue with proper alignment? */ rdev->mc.gtt_size = 32 * 1024 * 1024; DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); } @@ -202,9 +206,9 @@ void rs400_gart_disable(struct radeon_device *rdev) void rs400_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); rs400_gart_disable(rdev); radeon_gart_table_ram_free(rdev); - radeon_gart_fini(rdev); } int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) @@ -241,8 +245,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) void rs400_gpu_init(struct radeon_device *rdev) { - /* FIXME: HDP same place on rs400 ? */ - r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); if (rs400_mc_wait_for_idle(rdev)) { @@ -263,7 +265,9 @@ void rs400_mc_init(struct radeon_device *rdev) r100_vram_init_sizes(rdev); base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); } uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) @@ -388,6 +392,8 @@ static int rs400_startup(struct radeon_device *rdev) { int r; + r100_set_common_regs(rdev); + rs400_mc_program(rdev); /* Resume clock */ r300_clock_startup(rdev); @@ -428,7 +434,7 @@ int rs400_resume(struct radeon_device *rdev) /* setup MC before calling post tables */ rs400_mc_program(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -491,7 +497,7 @@ int rs400_init(struct radeon_device *rdev) return r; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -503,8 +509,6 @@ int rs400_init(struct radeon_device *rdev) /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize memory controller */ rs400_mc_init(rdev); /* Fence driver */ diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 47f046b78c6b..6dc15ea8ba33 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -37,6 +37,7 @@ */ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" #include "rs600d.h" @@ -45,6 +46,136 @@ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); +void rs600_pm_misc(struct radeon_device *rdev) +{ + int requested_index = rdev->pm.requested_power_state_index; + struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; + struct radeon_voltage *voltage = &ps->clock_info[0].voltage; + u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; + u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; + + if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { + if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp |= voltage->gpio.mask; + else + tmp &= ~(voltage->gpio.mask); + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } else { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp &= ~voltage->gpio.mask; + else + tmp |= voltage->gpio.mask; + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } + } else if (voltage->type == VOLTAGE_VDDC) + radeon_atom_set_voltage(rdev, voltage->vddc_id); + + dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); + dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); + dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); + if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); + } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); + } + } else { + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); + dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); + } + WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); + + dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); + if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { + dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; + if (voltage->delay) { + dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; + dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); + } else + dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; + } else + dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; + WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); + + hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) + hdp_dyn_cntl &= ~HDP_FORCEON; + else + hdp_dyn_cntl |= HDP_FORCEON; + WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); +#if 0 + /* mc_host_dyn seems to cause hangs from time to time */ + mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) + mc_host_dyn_cntl &= ~MC_HOST_FORCEON; + else + mc_host_dyn_cntl |= MC_HOST_FORCEON; + WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); +#endif + dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); + if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) + dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; + else + dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; + WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); + + /* set pcie lanes */ + if ((rdev->flags & RADEON_IS_PCIE) && + !(rdev->flags & RADEON_IS_IGP) && + rdev->asic->set_pcie_lanes && + (ps->pcie_lanes != + rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { + radeon_set_pcie_lanes(rdev, + ps->pcie_lanes); + DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); + } +} + +void rs600_pm_prepare(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* disable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); + tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); + } + } +} + +void rs600_pm_finish(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* enable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); + } + } +} + /* hpd for digital panel detect/disconnect */ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -146,6 +277,78 @@ void rs600_hpd_fini(struct radeon_device *rdev) } } +void rs600_bm_disable(struct radeon_device *rdev) +{ + u32 tmp; + + /* disable bus mastering */ + pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); + pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); + mdelay(1); +} + +int rs600_asic_reset(struct radeon_device *rdev) +{ + u32 status, tmp; + + struct rv515_mc_save save; + + /* Stops all mc clients */ + rv515_mc_stop(rdev, &save); + status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(status)) { + return 0; + } + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* stop CP */ + WREG32(RADEON_CP_CSQ_CNTL, 0); + tmp = RREG32(RADEON_CP_RB_CNTL); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_RPTR_WR, 0); + WREG32(RADEON_CP_RB_WPTR, 0); + WREG32(RADEON_CP_RB_CNTL, tmp); + pci_save_state(rdev->pdev); + /* disable bus mastering */ + rs600_bm_disable(rdev); + /* reset GA+VAP */ + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | + S_0000F0_SOFT_RESET_GA(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* reset CP */ + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* reset MC */ + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* restore PCI & busmastering */ + pci_restore_state(rdev->pdev); + /* Check if GPU is idle */ + if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { + dev_err(rdev->dev, "failed to reset GPU\n"); + rdev->gpu_lockup = true; + return -1; + } + rv515_mc_resume(rdev, &save); + dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; +} + /* * GART. */ @@ -158,7 +361,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) WREG32_MC(R_000100_MC_PT0_CNTL, tmp); tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); + tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); WREG32_MC(R_000100_MC_PT0_CNTL, tmp); tmp = RREG32_MC(R_000100_MC_PT0_CNTL); @@ -267,9 +470,9 @@ void rs600_gart_disable(struct radeon_device *rdev) void rs600_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); rs600_gart_disable(rdev); radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } #define R600_PTE_VALID (1 << 0) @@ -309,6 +512,9 @@ int rs600_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { tmp |= S_000040_SW_INT_EN(1); } + if (rdev->irq.gui_idle) { + tmp |= S_000040_GUI_IDLE(1); + } if (rdev->irq.crtc_vblank_int[0]) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } @@ -331,9 +537,15 @@ int rs600_irq_set(struct radeon_device *rdev) static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) { uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); - uint32_t irq_mask = ~C_000044_SW_INT; + uint32_t irq_mask = S_000044_SW_INT(1); u32 tmp; + /* the interrupt works, but the status bit is permanently asserted */ + if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { + if (!rdev->irq.gui_idle_acked) + irq_mask |= S_000044_GUI_IDLE_STAT(1); + } + if (G_000044_DISPLAY_INT_STAT(irqs)) { *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { @@ -381,6 +593,9 @@ int rs600_irq_process(struct radeon_device *rdev) uint32_t r500_disp_int; bool queue_hotplug = false; + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; + status = rs600_irq_ack(rdev, &r500_disp_int); if (!status && !r500_disp_int) { return IRQ_NONE; @@ -389,13 +604,21 @@ int rs600_irq_process(struct radeon_device *rdev) /* SW interrupt */ if (G_000044_SW_INT(status)) radeon_fence_process(rdev); + /* GUI idle */ + if (G_000040_GUI_IDLE(status)) { + rdev->irq.gui_idle_acked = true; + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + } /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { @@ -408,6 +631,8 @@ int rs600_irq_process(struct radeon_device *rdev) } status = rs600_irq_ack(rdev, &r500_disp_int); } + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; if (queue_hotplug) queue_work(rdev->wq, &rdev->hotplug_work); if (rdev->msi_enabled) { @@ -451,7 +676,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) void rs600_gpu_init(struct radeon_device *rdev) { - r100_hdp_reset(rdev); r420_pipes_init(rdev); /* Wait for mc idle */ if (rs600_mc_wait_for_idle(rdev)) @@ -472,13 +696,39 @@ void rs600_mc_init(struct radeon_device *rdev) rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); base = RREG32_MC(R_000004_MC_FB_LOCATION); base = G_000004_MC_FB_START(base) << 16; + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); } void rs600_bandwidth_update(struct radeon_device *rdev) { - /* FIXME: implement, should this be like rs690 ? */ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; + /* FIXME: implement full support */ + + radeon_update_display_priority(rdev); + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + + rs690_line_buffer_adjust(rdev, mode0, mode1); + + if (rdev->disp_priority == 2) { + d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); + d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); + d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); + d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); + } } uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) @@ -573,7 +823,7 @@ int rs600_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ rv515_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -635,7 +885,7 @@ int rs600_init(struct radeon_device *rdev) return -EINVAL; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -647,8 +897,6 @@ int rs600_init(struct radeon_device *rdev) /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize memory controller */ rs600_mc_init(rdev); rs600_debugfs(rdev); diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index c1c8f5885cbb..a27c13ac47c3 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h @@ -178,6 +178,52 @@ #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) #define C_000074_MC_IND_DATA 0x00000000 +#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 +#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) +#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) +#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE +#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) +#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) +#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD +#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) +#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) +#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB +#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) +#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) +#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 +#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) +#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) +#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF +#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) +#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) +#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF +#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) +#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) +#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF +#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) +#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) +#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F +#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) +#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) +#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF +#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) +#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) +#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF +#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) +#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) +#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF +#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) +#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) +#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF +#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) +#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) +#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF +#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) +#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) +#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF +#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) +#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) +#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF #define R_000134_HDP_FB_LOCATION 0x000134 #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) @@ -535,4 +581,91 @@ #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF +#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 +#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) +#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) +#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 +#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) +#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF +#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF +#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF +#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C +#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) +#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) +#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 +#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) +#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF +#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF +#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF +#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 +#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) +#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) +#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 +#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) +#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF +#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF +#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF +#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C +#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) +#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) +#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 +#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) +#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF +#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF +#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF + +/* PLL regs */ +#define GENERAL_PWRMGT 0x8 +#define GLOBAL_PWRMGT_EN (1 << 0) +#define MOBILE_SU (1 << 2) +#define DYN_PWRMGT_SCLK_LENGTH 0xc +#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0) +#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4) +#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8) +#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12) +#define POWER_D1_SCLK_HILEN(x) ((x) << 16) +#define POWER_D1_SCLK_LOLEN(x) ((x) << 20) +#define STATIC_SCREEN_HILEN(x) ((x) << 24) +#define STATIC_SCREEN_LOLEN(x) ((x) << 28) +#define DYN_SCLK_VOL_CNTL 0xe +#define IO_CG_VOLTAGE_DROP (1 << 0) +#define VOLTAGE_DROP_SYNC (1 << 2) +#define VOLTAGE_DELAY_SEL(x) ((x) << 3) +#define HDP_DYN_CNTL 0x10 +#define HDP_FORCEON (1 << 0) +#define MC_HOST_DYN_CNTL 0x1e +#define MC_HOST_FORCEON (1 << 0) +#define DYN_BACKBIAS_CNTL 0x29 +#define IO_CG_BACKBIAS_EN (1 << 0) + +/* mmreg */ +#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0 +#define PWRDN_WAIT_BUSY_OFF (1 << 0) +#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4) +#define PWRDN_WAIT_PPLL_OFF (1 << 8) +#define PWRUP_WAIT_PPLL_ON (1 << 12) +#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16) +#define PM_ASSERT_RESET (1 << 20) +#define PM_PWRDN_PPLL (1 << 24) + #endif diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 83b9174f76f2..ce4ecbe10816 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -27,6 +27,7 @@ */ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" #include "rs690d.h" @@ -47,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev) static void rs690_gpu_init(struct radeon_device *rdev) { - /* FIXME: HDP same place on rs690 ? */ - r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); if (rs690_mc_wait_for_idle(rdev)) { @@ -57,65 +56,82 @@ static void rs690_gpu_init(struct radeon_device *rdev) } } +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2; +}; + void rs690_pm_info(struct radeon_device *rdev) { int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - struct _ATOM_INTEGRATED_SYSTEM_INFO *info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; - void *ptr; + union igp_info *info; uint16_t data_offset; uint8_t frev, crev; fixed20_12 tmp; - atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, - &frev, &crev, &data_offset); - ptr = rdev->mode_info.atom_context->bios + data_offset; - info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; - info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; - /* Get various system informations from bios */ - switch (crev) { - case 1: - tmp.full = rfixed_const(100); - rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); - rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); - rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); - rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); - rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); - break; - case 2: - tmp.full = rfixed_const(100); - rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); - rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); - rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); - rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); - rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); - rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); - rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); - break; - default: - tmp.full = rfixed_const(100); + if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, + &frev, &crev, &data_offset)) { + info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset); + + /* Get various system informations from bios */ + switch (crev) { + case 1: + tmp.full = dfixed_const(100); + rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); + rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); + if (info->info.usK8MemoryClock) + rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); + else if (rdev->clock.default_mclk) { + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); + rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); + } else + rdev->pm.igp_system_mclk.full = dfixed_const(400); + rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); + rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); + break; + case 2: + tmp.full = dfixed_const(100); + rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); + rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); + if (info->info_v2.ulBootUpUMAClock) + rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); + else if (rdev->clock.default_mclk) + rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); + else + rdev->pm.igp_system_mclk.full = dfixed_const(66700); + rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); + rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); + rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); + rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); + break; + default: + /* We assume the slower possible clock ie worst case */ + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); + rdev->pm.igp_system_mclk.full = dfixed_const(200); + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); + rdev->pm.igp_ht_link_width.full = dfixed_const(8); + DRM_ERROR("No integrated system info for your GPU, using safe default\n"); + break; + } + } else { /* We assume the slower possible clock ie worst case */ - /* DDR 333Mhz */ - rdev->pm.igp_sideport_mclk.full = rfixed_const(333); - /* FIXME: system clock ? */ - rdev->pm.igp_system_mclk.full = rfixed_const(100); - rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); - rdev->pm.igp_ht_link_clk.full = rfixed_const(200); - rdev->pm.igp_ht_link_width.full = rfixed_const(8); + rdev->pm.igp_sideport_mclk.full = dfixed_const(200); + rdev->pm.igp_system_mclk.full = dfixed_const(200); + rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); + rdev->pm.igp_ht_link_width.full = dfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); - break; } /* Compute various bandwidth */ /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ - tmp.full = rfixed_const(4); - rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); + tmp.full = dfixed_const(4); + rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 * = ht_clk * ht_width / 5 */ - tmp.full = rfixed_const(5); - rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, + tmp.full = dfixed_const(5); + rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, rdev->pm.igp_ht_link_width); - rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); + rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); if (tmp.full < rdev->pm.max_bandwidth.full) { /* HT link is a limiting factor */ rdev->pm.max_bandwidth.full = tmp.full; @@ -123,15 +139,14 @@ void rs690_pm_info(struct radeon_device *rdev) /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 * = (sideport_clk * 14) / 10 */ - tmp.full = rfixed_const(14); - rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); - tmp.full = rfixed_const(10); - rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); + tmp.full = dfixed_const(14); + rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); + tmp.full = dfixed_const(10); + rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); } void rs690_mc_init(struct radeon_device *rdev) { - fixed20_12 a; u64 base; rs400_gart_adjust_size(rdev); @@ -145,18 +160,11 @@ void rs690_mc_init(struct radeon_device *rdev) base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rs690_pm_info(rdev); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); - a.full = rfixed_const(16); - /* core_bandwidth = sclk(Mhz) * 16 */ - rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); } void rs690_line_buffer_adjust(struct radeon_device *rdev, @@ -224,10 +232,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, fixed20_12 a, b, c; fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; - /* FIXME: detect IGP with sideport memory, i don't think there is any - * such product available - */ - bool sideport = false; if (!crtc->base.enabled) { /* FIXME: wouldn't it better to set priority mark to maximum */ @@ -235,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, return; } - if (crtc->vsc.full > rfixed_const(2)) - wm->num_line_pair.full = rfixed_const(2); + if (crtc->vsc.full > dfixed_const(2)) + wm->num_line_pair.full = dfixed_const(2); else - wm->num_line_pair.full = rfixed_const(1); - - b.full = rfixed_const(mode->crtc_hdisplay); - c.full = rfixed_const(256); - a.full = rfixed_div(b, c); - request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); - request_fifo_depth.full = rfixed_ceil(request_fifo_depth); - if (a.full < rfixed_const(4)) { + wm->num_line_pair.full = dfixed_const(1); + + b.full = dfixed_const(mode->crtc_hdisplay); + c.full = dfixed_const(256); + a.full = dfixed_div(b, c); + request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); + request_fifo_depth.full = dfixed_ceil(request_fifo_depth); + if (a.full < dfixed_const(4)) { wm->lb_request_fifo_depth = 4; } else { - wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); } /* Determine consumption rate @@ -257,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, * vsc = vertical scaling ratio, defined as source/destination * hsc = horizontal scaling ration, defined as source/destination */ - a.full = rfixed_const(mode->clock); - b.full = rfixed_const(1000); - a.full = rfixed_div(a, b); - pclk.full = rfixed_div(b, a); + a.full = dfixed_const(mode->clock); + b.full = dfixed_const(1000); + a.full = dfixed_div(a, b); + pclk.full = dfixed_div(b, a); if (crtc->rmx_type != RMX_OFF) { - b.full = rfixed_const(2); + b.full = dfixed_const(2); if (crtc->vsc.full > b.full) b.full = crtc->vsc.full; - b.full = rfixed_mul(b, crtc->hsc); - c.full = rfixed_const(2); - b.full = rfixed_div(b, c); - consumption_time.full = rfixed_div(pclk, b); + b.full = dfixed_mul(b, crtc->hsc); + c.full = dfixed_const(2); + b.full = dfixed_div(b, c); + consumption_time.full = dfixed_div(pclk, b); } else { consumption_time.full = pclk.full; } - a.full = rfixed_const(1); - wm->consumption_rate.full = rfixed_div(a, consumption_time); + a.full = dfixed_const(1); + wm->consumption_rate.full = dfixed_div(a, consumption_time); /* Determine line time @@ -281,27 +285,27 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, * LineTime = total number of horizontal pixels * pclk = pixel clock period(ns) */ - a.full = rfixed_const(crtc->base.mode.crtc_htotal); - line_time.full = rfixed_mul(a, pclk); + a.full = dfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = dfixed_mul(a, pclk); /* Determine active time * ActiveTime = time of active region of display within one line, * hactive = total number of horizontal active pixels * htotal = total number of horizontal pixels */ - a.full = rfixed_const(crtc->base.mode.crtc_htotal); - b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); - wm->active_time.full = rfixed_mul(line_time, b); - wm->active_time.full = rfixed_div(wm->active_time, a); + a.full = dfixed_const(crtc->base.mode.crtc_htotal); + b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = dfixed_mul(line_time, b); + wm->active_time.full = dfixed_div(wm->active_time, a); /* Maximun bandwidth is the minimun bandwidth of all component */ rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; - if (sideport) { + if (rdev->mc.igp_sideport_enabled) { if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && rdev->pm.sideport_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; - read_delay_latency.full = rfixed_const(370 * 800 * 1000); - read_delay_latency.full = rfixed_div(read_delay_latency, + read_delay_latency.full = dfixed_const(370 * 800 * 1000); + read_delay_latency.full = dfixed_div(read_delay_latency, rdev->pm.igp_sideport_mclk); } else { if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && @@ -310,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && rdev->pm.ht_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; - read_delay_latency.full = rfixed_const(5000); + read_delay_latency.full = dfixed_const(5000); } /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ - a.full = rfixed_const(16); - rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); - a.full = rfixed_const(1000); - rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); + a.full = dfixed_const(16); + rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); + a.full = dfixed_const(1000); + rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); /* Determine chunk time * ChunkTime = the time it takes the DCP to send one chunk of data * to the LB which consists of pipeline delay and inter chunk gap * sclk = system clock(ns) */ - a.full = rfixed_const(256 * 13); - chunk_time.full = rfixed_mul(rdev->pm.sclk, a); - a.full = rfixed_const(10); - chunk_time.full = rfixed_div(chunk_time, a); + a.full = dfixed_const(256 * 13); + chunk_time.full = dfixed_mul(rdev->pm.sclk, a); + a.full = dfixed_const(10); + chunk_time.full = dfixed_div(chunk_time, a); /* Determine the worst case latency * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) @@ -336,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, * ChunkTime = time it takes the DCP to send one chunk of data to the LB * which consists of pipeline delay and inter chunk gap */ - if (rfixed_trunc(wm->num_line_pair) > 1) { - a.full = rfixed_const(3); - wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + if (dfixed_trunc(wm->num_line_pair) > 1) { + a.full = dfixed_const(3); + wm->worst_case_latency.full = dfixed_mul(a, chunk_time); wm->worst_case_latency.full += read_delay_latency.full; } else { - a.full = rfixed_const(2); - wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + a.full = dfixed_const(2); + wm->worst_case_latency.full = dfixed_mul(a, chunk_time); wm->worst_case_latency.full += read_delay_latency.full; } @@ -356,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, * of data to the LB which consists of * pipeline delay and inter chunk gap */ - if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { tolerable_latency.full = line_time.full; } else { - tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; - tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); tolerable_latency.full = line_time.full - tolerable_latency.full; } /* We assume worst case 32bits (4 bytes) */ - wm->dbpp.full = rfixed_const(4 * 8); + wm->dbpp.full = dfixed_const(4 * 8); /* Determine the maximum priority mark * width = viewport width in pixels */ - a.full = rfixed_const(16); - wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); - wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); - wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); + a.full = dfixed_const(16); + wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); + wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); /* Determine estimated width */ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; - estimated_width.full = rfixed_div(estimated_width, consumption_time); - if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { - wm->priority_mark.full = rfixed_const(10); + estimated_width.full = dfixed_div(estimated_width, consumption_time); + if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = dfixed_const(10); } else { - a.full = rfixed_const(16); - wm->priority_mark.full = rfixed_div(estimated_width, a); - wm->priority_mark.full = rfixed_ceil(wm->priority_mark); + a.full = dfixed_const(16); + wm->priority_mark.full = dfixed_div(estimated_width, a); + wm->priority_mark.full = dfixed_ceil(wm->priority_mark); wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; } } @@ -394,10 +398,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rs690_watermark wm0; struct rs690_watermark wm1; - u32 tmp; + u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; + radeon_update_display_priority(rdev); + if (rdev->mode_info.crtcs[0]->base.enabled) mode0 = &rdev->mode_info.crtcs[0]->base.mode; if (rdev->mode_info.crtcs[1]->base.enabled) @@ -407,7 +413,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev) * modes if the user specifies HIGH for displaypriority * option. */ - if (rdev->disp_priority == 2) { + if ((rdev->disp_priority == 2) && + ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) { tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); tmp &= C_000104_MC_DISP0R_INIT_LAT; tmp &= C_000104_MC_DISP1R_INIT_LAT; @@ -432,124 +439,136 @@ void rs690_bandwidth_update(struct radeon_device *rdev) WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); if (mode0 && mode1) { - if (rfixed_trunc(wm0.dbpp) > 64) - a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0.dbpp) > 64) + a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; - if (rfixed_trunc(wm1.dbpp) > 64) - b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + if (dfixed_trunc(wm1.dbpp) > 64) + b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else b.full = wm1.num_line_pair.full; a.full += b.full; - fill_rate.full = rfixed_div(wm0.sclk, a); + fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm0.active_time); - a.full = rfixed_mul(wm0.worst_case_latency, + b.full = dfixed_mul(b, wm0.active_time); + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); a.full = a.full + b.full; - b.full = rfixed_const(16 * 1000); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); } else { - a.full = rfixed_mul(wm0.worst_case_latency, + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); } if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm1.active_time); - a.full = rfixed_mul(wm1.worst_case_latency, + b.full = dfixed_mul(b, wm1.active_time); + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); a.full = a.full + b.full; - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } else { - a.full = rfixed_mul(wm1.worst_case_latency, + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; - if (rfixed_trunc(priority_mark02) < 0) + if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; - if (rfixed_trunc(priority_mark12) < 0) + if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) { + d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); + d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); + } + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { - if (rfixed_trunc(wm0.dbpp) > 64) - a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0.dbpp) > 64) + a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; - fill_rate.full = rfixed_div(wm0.sclk, a); + fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm0.active_time); - a.full = rfixed_mul(wm0.worst_case_latency, + b.full = dfixed_mul(b, wm0.active_time); + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); a.full = a.full + b.full; - b.full = rfixed_const(16 * 1000); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); } else { - a.full = rfixed_mul(wm0.worst_case_latency, + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; - if (rfixed_trunc(priority_mark02) < 0) + if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (rdev->disp_priority == 2) + d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, S_006D48_D2MODE_PRIORITY_A_OFF(1)); WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, S_006D4C_D2MODE_PRIORITY_B_OFF(1)); } else { - if (rfixed_trunc(wm1.dbpp) > 64) - a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + if (dfixed_trunc(wm1.dbpp) > 64) + a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else a.full = wm1.num_line_pair.full; - fill_rate.full = rfixed_div(wm1.sclk, a); + fill_rate.full = dfixed_div(wm1.sclk, a); if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm1.active_time); - a.full = rfixed_mul(wm1.worst_case_latency, + b.full = dfixed_mul(b, wm1.active_time); + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); a.full = a.full + b.full; - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } else { - a.full = rfixed_mul(wm1.worst_case_latency, + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; - if (rfixed_trunc(priority_mark12) < 0) + if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; + d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) + d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); WREG32(R_006548_D1MODE_PRIORITY_A_CNT, S_006548_D1MODE_PRIORITY_A_OFF(1)); WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, S_00654C_D1MODE_PRIORITY_B_OFF(1)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } } @@ -632,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ rv515_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -695,7 +714,7 @@ int rs690_init(struct radeon_device *rdev) return -EINVAL; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -707,8 +726,6 @@ int rs690_init(struct radeon_device *rdev) /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize memory controller */ rs690_mc_init(rdev); rv515_debugfs(rdev); diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h index 62d31e7a897f..36e6398a98ae 100644 --- a/drivers/gpu/drm/radeon/rs690d.h +++ b/drivers/gpu/drm/radeon/rs690d.h @@ -182,6 +182,9 @@ #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF +#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index bea747da123f..0c9c169a6852 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -26,9 +26,11 @@ * Jerome Glisse */ #include <linux/seq_file.h> +#include <linux/slab.h> #include "drmP.h" #include "rv515d.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" #include "rv515_reg_safe.h" @@ -145,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev) { unsigned pipe_select_current, gb_pipe_select, tmp; - r100_hdp_reset(rdev); - r100_rb2d_reset(rdev); - if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "reseting GPU. Bad things might happen.\n"); } - rv515_vga_render_disable(rdev); - r420_pipes_init(rdev); gb_pipe_select = RREG32(0x402C); tmp = RREG32(0x170C); @@ -172,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev) } } -int rv515_ga_reset(struct radeon_device *rdev) -{ - uint32_t tmp; - bool reinit_cp; - int i; - - reinit_cp = rdev->cp.ready; - rdev->cp.ready = false; - for (i = 0; i < rdev->usec_timeout; i++) { - WREG32(CP_CSQ_MODE, 0); - WREG32(CP_CSQ_CNTL, 0); - WREG32(RBBM_SOFT_RESET, 0x32005); - (void)RREG32(RBBM_SOFT_RESET); - udelay(200); - WREG32(RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - tmp = RREG32(RBBM_STATUS); - if (tmp & ((1 << 20) | (1 << 26))) { - DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); - /* GA still busy soft reset it */ - WREG32(0x429C, 0x200); - WREG32(VAP_PVS_STATE_FLUSH_REG, 0); - WREG32(0x43E0, 0); - WREG32(0x43E4, 0); - WREG32(0x24AC, 0); - } - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - tmp = RREG32(RBBM_STATUS); - if (!(tmp & ((1 << 20) | (1 << 26)))) { - break; - } - } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RBBM_STATUS); - if (!(tmp & ((1 << 20) | (1 << 26)))) { - DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C)); - DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0)); - DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724)); - if (reinit_cp) { - return r100_cp_init(rdev, rdev->cp.ring_size); - } - return 0; - } - DRM_UDELAY(1); - } - tmp = RREG32(RBBM_STATUS); - DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); - return -1; -} - -int rv515_gpu_reset(struct radeon_device *rdev) -{ - uint32_t status; - - /* reset order likely matter */ - status = RREG32(RBBM_STATUS); - /* reset HDP */ - r100_hdp_reset(rdev); - /* reset rb2d */ - if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { - r100_rb2d_reset(rdev); - } - /* reset GA */ - if (status & ((1 << 20) | (1 << 26))) { - rv515_ga_reset(rdev); - } - /* reset CP */ - status = RREG32(RBBM_STATUS); - if (status & (1 << 16)) { - r100_cp_reset(rdev); - } - /* Check if GPU is idle */ - status = RREG32(RBBM_STATUS); - if (status & (1 << 31)) { - DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); - return -1; - } - DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); - return 0; -} - static void rv515_vram_get_type(struct radeon_device *rdev) { uint32_t tmp; @@ -279,19 +191,14 @@ static void rv515_vram_get_type(struct radeon_device *rdev) void rv515_mc_init(struct radeon_device *rdev) { - fixed20_12 a; rv515_vram_get_type(rdev); r100_vram_init_sizes(rdev); radeon_vram_location(rdev, &rdev->mc, 0); + rdev->mc.gtt_base_align = 0; if (!(rdev->flags & RADEON_IS_AGP)) radeon_gtt_location(rdev, &rdev->mc); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + radeon_update_bandwidth_info(rdev); } uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) @@ -339,7 +246,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data) tmp = RREG32(0x2140); seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); - radeon_gpu_reset(rdev); + radeon_asic_reset(rdev); tmp = RREG32(0x425C); seq_printf(m, "GA_IDLE 0x%08x\n", tmp); return 0; @@ -507,7 +414,7 @@ int rv515_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ rv515_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -576,7 +483,7 @@ int rv515_init(struct radeon_device *rdev) return -EINVAL; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -587,8 +494,6 @@ int rv515_init(struct radeon_device *rdev) return -EINVAL; /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); @@ -888,20 +793,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, return; } - if (crtc->vsc.full > rfixed_const(2)) - wm->num_line_pair.full = rfixed_const(2); + if (crtc->vsc.full > dfixed_const(2)) + wm->num_line_pair.full = dfixed_const(2); else - wm->num_line_pair.full = rfixed_const(1); - - b.full = rfixed_const(mode->crtc_hdisplay); - c.full = rfixed_const(256); - a.full = rfixed_div(b, c); - request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); - request_fifo_depth.full = rfixed_ceil(request_fifo_depth); - if (a.full < rfixed_const(4)) { + wm->num_line_pair.full = dfixed_const(1); + + b.full = dfixed_const(mode->crtc_hdisplay); + c.full = dfixed_const(256); + a.full = dfixed_div(b, c); + request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); + request_fifo_depth.full = dfixed_ceil(request_fifo_depth); + if (a.full < dfixed_const(4)) { wm->lb_request_fifo_depth = 4; } else { - wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); } /* Determine consumption rate @@ -910,23 +815,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * vsc = vertical scaling ratio, defined as source/destination * hsc = horizontal scaling ration, defined as source/destination */ - a.full = rfixed_const(mode->clock); - b.full = rfixed_const(1000); - a.full = rfixed_div(a, b); - pclk.full = rfixed_div(b, a); + a.full = dfixed_const(mode->clock); + b.full = dfixed_const(1000); + a.full = dfixed_div(a, b); + pclk.full = dfixed_div(b, a); if (crtc->rmx_type != RMX_OFF) { - b.full = rfixed_const(2); + b.full = dfixed_const(2); if (crtc->vsc.full > b.full) b.full = crtc->vsc.full; - b.full = rfixed_mul(b, crtc->hsc); - c.full = rfixed_const(2); - b.full = rfixed_div(b, c); - consumption_time.full = rfixed_div(pclk, b); + b.full = dfixed_mul(b, crtc->hsc); + c.full = dfixed_const(2); + b.full = dfixed_div(b, c); + consumption_time.full = dfixed_div(pclk, b); } else { consumption_time.full = pclk.full; } - a.full = rfixed_const(1); - wm->consumption_rate.full = rfixed_div(a, consumption_time); + a.full = dfixed_const(1); + wm->consumption_rate.full = dfixed_div(a, consumption_time); /* Determine line time @@ -934,27 +839,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * LineTime = total number of horizontal pixels * pclk = pixel clock period(ns) */ - a.full = rfixed_const(crtc->base.mode.crtc_htotal); - line_time.full = rfixed_mul(a, pclk); + a.full = dfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = dfixed_mul(a, pclk); /* Determine active time * ActiveTime = time of active region of display within one line, * hactive = total number of horizontal active pixels * htotal = total number of horizontal pixels */ - a.full = rfixed_const(crtc->base.mode.crtc_htotal); - b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); - wm->active_time.full = rfixed_mul(line_time, b); - wm->active_time.full = rfixed_div(wm->active_time, a); + a.full = dfixed_const(crtc->base.mode.crtc_htotal); + b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = dfixed_mul(line_time, b); + wm->active_time.full = dfixed_div(wm->active_time, a); /* Determine chunk time * ChunkTime = the time it takes the DCP to send one chunk of data * to the LB which consists of pipeline delay and inter chunk gap * sclk = system clock(Mhz) */ - a.full = rfixed_const(600 * 1000); - chunk_time.full = rfixed_div(a, rdev->pm.sclk); - read_delay_latency.full = rfixed_const(1000); + a.full = dfixed_const(600 * 1000); + chunk_time.full = dfixed_div(a, rdev->pm.sclk); + read_delay_latency.full = dfixed_const(1000); /* Determine the worst case latency * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) @@ -964,9 +869,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * ChunkTime = time it takes the DCP to send one chunk of data to the LB * which consists of pipeline delay and inter chunk gap */ - if (rfixed_trunc(wm->num_line_pair) > 1) { - a.full = rfixed_const(3); - wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + if (dfixed_trunc(wm->num_line_pair) > 1) { + a.full = dfixed_const(3); + wm->worst_case_latency.full = dfixed_mul(a, chunk_time); wm->worst_case_latency.full += read_delay_latency.full; } else { wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; @@ -982,34 +887,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * of data to the LB which consists of * pipeline delay and inter chunk gap */ - if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { tolerable_latency.full = line_time.full; } else { - tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; - tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); tolerable_latency.full = line_time.full - tolerable_latency.full; } /* We assume worst case 32bits (4 bytes) */ - wm->dbpp.full = rfixed_const(2 * 16); + wm->dbpp.full = dfixed_const(2 * 16); /* Determine the maximum priority mark * width = viewport width in pixels */ - a.full = rfixed_const(16); - wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); - wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); - wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); + a.full = dfixed_const(16); + wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); + wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); /* Determine estimated width */ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; - estimated_width.full = rfixed_div(estimated_width, consumption_time); - if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + estimated_width.full = dfixed_div(estimated_width, consumption_time); + if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { wm->priority_mark.full = wm->priority_mark_max.full; } else { - a.full = rfixed_const(16); - wm->priority_mark.full = rfixed_div(estimated_width, a); - wm->priority_mark.full = rfixed_ceil(wm->priority_mark); + a.full = dfixed_const(16); + wm->priority_mark.full = dfixed_div(estimated_width, a); + wm->priority_mark.full = dfixed_ceil(wm->priority_mark); wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; } } @@ -1020,7 +925,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rv515_watermark wm0; struct rv515_watermark wm1; - u32 tmp; + u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; @@ -1038,120 +943,132 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) WREG32(LB_MAX_REQ_OUTSTANDING, tmp); if (mode0 && mode1) { - if (rfixed_trunc(wm0.dbpp) > 64) - a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0.dbpp) > 64) + a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; - if (rfixed_trunc(wm1.dbpp) > 64) - b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + if (dfixed_trunc(wm1.dbpp) > 64) + b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); else b.full = wm1.num_line_pair.full; a.full += b.full; - fill_rate.full = rfixed_div(wm0.sclk, a); + fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm0.active_time); - a.full = rfixed_const(16); - b.full = rfixed_div(b, a); - a.full = rfixed_mul(wm0.worst_case_latency, + b.full = dfixed_mul(b, wm0.active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); priority_mark02.full = a.full + b.full; } else { - a.full = rfixed_mul(wm0.worst_case_latency, + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); } if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm1.active_time); - a.full = rfixed_const(16); - b.full = rfixed_div(b, a); - a.full = rfixed_mul(wm1.worst_case_latency, + b.full = dfixed_mul(b, wm1.active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); priority_mark12.full = a.full + b.full; } else { - a.full = rfixed_mul(wm1.worst_case_latency, + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; - if (rfixed_trunc(priority_mark02) < 0) + if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; - if (rfixed_trunc(priority_mark12) < 0) + if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) { + d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + } + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { - if (rfixed_trunc(wm0.dbpp) > 64) - a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0.dbpp) > 64) + a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); else a.full = wm0.num_line_pair.full; - fill_rate.full = rfixed_div(wm0.sclk, a); + fill_rate.full = dfixed_div(wm0.sclk, a); if (wm0.consumption_rate.full > fill_rate.full) { b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm0.active_time); - a.full = rfixed_const(16); - b.full = rfixed_div(b, a); - a.full = rfixed_mul(wm0.worst_case_latency, + b.full = dfixed_mul(b, wm0.active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); priority_mark02.full = a.full + b.full; } else { - a.full = rfixed_mul(wm0.worst_case_latency, + a.full = dfixed_mul(wm0.worst_case_latency, wm0.consumption_rate); - b.full = rfixed_const(16); - priority_mark02.full = rfixed_div(a, b); + b.full = dfixed_const(16); + priority_mark02.full = dfixed_div(a, b); } if (wm0.priority_mark.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark.full; - if (rfixed_trunc(priority_mark02) < 0) + if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; - WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (rdev->disp_priority == 2) + d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); } else { - if (rfixed_trunc(wm1.dbpp) > 64) - a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + if (dfixed_trunc(wm1.dbpp) > 64) + a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); else a.full = wm1.num_line_pair.full; - fill_rate.full = rfixed_div(wm1.sclk, a); + fill_rate.full = dfixed_div(wm1.sclk, a); if (wm1.consumption_rate.full > fill_rate.full) { b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = rfixed_mul(b, wm1.active_time); - a.full = rfixed_const(16); - b.full = rfixed_div(b, a); - a.full = rfixed_mul(wm1.worst_case_latency, + b.full = dfixed_mul(b, wm1.active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); priority_mark12.full = a.full + b.full; } else { - a.full = rfixed_mul(wm1.worst_case_latency, + a.full = dfixed_mul(wm1.worst_case_latency, wm1.consumption_rate); - b.full = rfixed_const(16 * 1000); - priority_mark12.full = rfixed_div(a, b); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); } if (wm1.priority_mark.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark.full; - if (rfixed_trunc(priority_mark12) < 0) + if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; + d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) + d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } } @@ -1161,6 +1078,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; + radeon_update_display_priority(rdev); + if (rdev->mode_info.crtcs[0]->base.enabled) mode0 = &rdev->mode_info.crtcs[0]->base.mode; if (rdev->mode_info.crtcs[1]->base.enabled) @@ -1170,7 +1089,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev) * modes if the user specifies HIGH for displaypriority * option. */ - if (rdev->disp_priority == 2) { + if ((rdev->disp_priority == 2) && + (rdev->family == CHIP_RV515)) { tmp = RREG32_MC(MC_MISC_LAT_TIMER); tmp &= ~MC_DISP1R_INIT_LAT_MASK; tmp &= ~MC_DISP0R_INIT_LAT_MASK; diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h index fc216e49384d..590309a710b1 100644 --- a/drivers/gpu/drm/radeon/rv515d.h +++ b/drivers/gpu/drm/radeon/rv515d.h @@ -217,6 +217,52 @@ #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) /* Registers */ +#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 +#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) +#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) +#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE +#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) +#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) +#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD +#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) +#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) +#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB +#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) +#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) +#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 +#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) +#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) +#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF +#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) +#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) +#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF +#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) +#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) +#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF +#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) +#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) +#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F +#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) +#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) +#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF +#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) +#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) +#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF +#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) +#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) +#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF +#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) +#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) +#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF +#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) +#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) +#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF +#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) +#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) +#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF +#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) +#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) +#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 37887dee12af..b7fd82064922 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -27,8 +27,10 @@ */ #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "radeon_drm.h" #include "rv770d.h" #include "atom.h" @@ -40,6 +42,21 @@ static void rv770_gpu_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); +void rv770_pm_misc(struct radeon_device *rdev) +{ + int req_ps_idx = rdev->pm.requested_power_state_index; + int req_cm_idx = rdev->pm.requested_clock_mode_index; + struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; + struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; + + if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { + if (voltage->voltage != rdev->pm.current_vddc) { + radeon_atom_set_voltage(rdev, voltage->voltage); + rdev->pm.current_vddc = voltage->voltage; + DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + } + } +} /* * GART @@ -125,9 +142,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) void rv770_pcie_gart_fini(struct radeon_device *rdev) { + radeon_gart_fini(rdev); rv770_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } @@ -207,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); @@ -235,7 +252,6 @@ void r700_cp_stop(struct radeon_device *rdev) WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); } - static int rv770_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; @@ -270,6 +286,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) return 0; } +void r700_cp_fini(struct radeon_device *rdev) +{ + r700_cp_stop(rdev); + radeon_ring_fini(rdev); +} /* * Core functions @@ -647,10 +668,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); + WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); + WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); + WREG32(CGTS_USER_TCC_DISABLE, 0); num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); @@ -864,7 +888,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) int rv770_mc_init(struct radeon_device *rdev) { - fixed20_12 a; u32 tmp; int chansize, numchan; @@ -902,25 +925,10 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; - /* FIXME remove this once we support unmappable VRAM */ - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { - rdev->mc.mc_vram_size = rdev->mc.aper_size; - rdev->mc.real_vram_size = rdev->mc.aper_size; - } r600_vram_gtt_location(rdev, &rdev->mc); - /* FIXME: we should enforce default clock in case GPU is not in - * default setup - */ - a.full = rfixed_const(100); - rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); - rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); - return 0; -} + radeon_update_bandwidth_info(rdev); -int rv770_gpu_reset(struct radeon_device *rdev) -{ - /* FIXME: implement any rv770 specific bits */ - return r600_gpu_reset(rdev); + return 0; } static int rv770_startup(struct radeon_device *rdev) @@ -1013,6 +1021,13 @@ int rv770_resume(struct radeon_device *rdev) DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; } + + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "radeon: audio init failed\n"); + return r; + } + return r; } @@ -1021,6 +1036,7 @@ int rv770_suspend(struct radeon_device *rdev) { int r; + r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; @@ -1086,8 +1102,6 @@ int rv770_init(struct radeon_device *rdev) r = radeon_clocks_init(rdev); if (r) return r; - /* Initialize power management */ - radeon_pm_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -1124,7 +1138,7 @@ int rv770_init(struct radeon_device *rdev) r = rv770_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); - r600_cp_fini(rdev); + r700_cp_fini(rdev); r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); @@ -1144,13 +1158,20 @@ int rv770_init(struct radeon_device *rdev) } } } + + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "radeon: audio init failed\n"); + return r; + } + return 0; } void rv770_fini(struct radeon_device *rdev) { r600_blit_fini(rdev); - r600_cp_fini(rdev); + r700_cp_fini(rdev); r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index bff6fc2524c8..fa05cda8c98f 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; - dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); + dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; - memset(dev_priv, 0, sizeof(drm_savage_private_t)); dev->dev_private = (void *)dev_priv; dev_priv->chipset = (enum savage_family)chipset; @@ -553,7 +552,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) /* - * Initalize mappings. On Savage4 and SavageIX the alignment + * Initialize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 1e138f5bae09..4256e2006476 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ - ttm_object.o ttm_lock.o ttm_execbuf_util.o + ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4648ed2f0143..4bf69c404491 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -35,6 +35,7 @@ #include "ttm/ttm_placement.h" #include <linux/agp_backend.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/io.h> #include <asm/agp.h> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 89c38c49066f..555ebb12ace8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); - printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); - printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", man->available_caching); @@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, - bool evict, bool interruptible, bool no_wait) + bool evict, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); @@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, - no_wait, mem); + no_wait_reserve, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) goto out_err; @@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) } EXPORT_SYMBOL(ttm_bo_unref); +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) +{ + return cancel_delayed_work_sync(&bdev->wq); +} +EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); + +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) +{ + if (resched) + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); +} +EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); + static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait) + bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; @@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, int ret = 0; spin_lock(&bo->lock); - ret = ttm_bo_wait(bo, false, interruptible, no_wait); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bo->lock); if (unlikely(ret != 0)) { @@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem = bo->mem; evict_mem.mm_node = NULL; + evict_mem.bus.io_reserved = false; placement.fpfn = 0; placement.lpfn = 0; @@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, - no_wait); + no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) { printk(KERN_ERR TTM_PFX @@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, } ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, - no_wait); + no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); @@ -670,7 +684,8 @@ out: static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, - bool interruptible, bool no_wait) + bool interruptible, bool no_wait_reserve, + bool no_wait_gpu) { struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -687,11 +702,11 @@ retry: bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); - if (likely(!no_wait)) + if (likely(!no_wait_gpu)) ret = ttm_bo_wait_unreserved(bo, interruptible); kref_put(&bo->list_kref, ttm_bo_release_list); @@ -713,7 +728,7 @@ retry: while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); - ret = ttm_bo_evict(bo, interruptible, no_wait); + ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); kref_put(&bo->list_kref, ttm_bo_release_list); @@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, struct ttm_placement *placement, struct ttm_mem_reg *mem, - bool interruptible, bool no_wait) + bool interruptible, + bool no_wait_reserve, + bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bdev->glob; @@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, } spin_unlock(&glob->lru_lock); ret = ttm_mem_evict_first(bdev, mem_type, interruptible, - no_wait); + no_wait_reserve, no_wait_gpu); if (unlikely(ret != 0)) return ret; } while (1); @@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, - bool interruptible, bool no_wait) + bool interruptible, bool no_wait_reserve, + bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, - interruptible, no_wait); + interruptible, no_wait_reserve, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; mem->mm_node->private = bo; @@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu); int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, - bool interruptible, bool no_wait) + bool interruptible, bool no_wait_reserve, + bool no_wait_gpu) { struct ttm_bo_global *glob = bo->glob; int ret = 0; @@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * instead of doing it here. */ spin_lock(&bo->lock); - ret = ttm_bo_wait(bo, false, interruptible, no_wait); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bo->lock); if (ret) return ret; mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; + mem.bus.io_reserved = false; /* * Determine where to move the buffer. */ - ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); + ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); if (ret) goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); + ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock: if (ret && mem.mm_node) { spin_lock(&glob->lru_lock); @@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, - bool interruptible, bool no_wait) + bool interruptible, bool no_wait_reserve, + bool no_wait_gpu) { int ret; @@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, */ ret = ttm_bo_mem_compat(placement, &bo->mem); if (ret < 0) { - ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); + ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); if (ret) return ret; } else { @@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; + bo->mem.bus.io_reserved = false; bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); @@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, goto out_err; } - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false, false); if (ret) goto out_err; @@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); while (!list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, false, false); + ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); if (ret) { if (allow_errors) { return ret; @@ -1425,8 +1447,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) atomic_set(&glob->bo_count, 0); - kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); - ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); + ret = kobject_init_and_add( + &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); if (unlikely(ret != 0)) kobject_put(&glob->kobj); return ret; @@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return true; } -int ttm_bo_pci_offset(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, - unsigned long *bus_base, - unsigned long *bus_offset, unsigned long *bus_size) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - - *bus_size = 0; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - - if (ttm_mem_reg_is_pci(bdev, mem)) { - *bus_offset = mem->mm_node->start << PAGE_SHIFT; - *bus_size = mem->num_pages << PAGE_SHIFT; - *bus_base = man->io_offset; - } - - return 0; -} - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; @@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) if (!bdev->dev_mapping) return; - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); + ttm_mem_io_free(bdev, &bo->mem); } EXPORT_SYMBOL(ttm_bo_unmap_virtual); @@ -1716,40 +1718,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) -{ - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); -} - -int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait) -{ - int ret; - - while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { - if (no_wait) - return -EBUSY; - else if (interruptible) { - ret = wait_event_interruptible - (bo->event_queue, atomic_read(&bo->reserved) == 0); - if (unlikely(ret != 0)) - return ret; - } else { - wait_event(bo->event_queue, - atomic_read(&bo->reserved) == 0); - } - } - return 0; -} - int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { int ret = 0; /* - * Using ttm_bo_reserve instead of ttm_bo_block_reservation - * makes sure the lru lists are updated. + * Using ttm_bo_reserve makes sure the lru lists are updated. */ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); @@ -1839,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, - false, false); + false, false, false); if (unlikely(ret != 0)) goto out; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 5ca37a58a98c..13012a1f1486 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -33,6 +33,7 @@ #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> @@ -49,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool no_wait, struct ttm_mem_reg *new_mem) + bool evict, bool no_wait_reserve, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; @@ -80,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + int ret; + + if (!mem->bus.io_reserved) { + mem->bus.io_reserved = true; + ret = bdev->driver->io_mem_reserve(bdev, mem); + if (unlikely(ret != 0)) + return ret; + } + return 0; +} + +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + if (bdev->driver->io_mem_reserve) { + if (mem->bus.io_reserved) { + mem->bus.io_reserved = false; + bdev->driver->io_mem_free(bdev, mem); + } + } +} + int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; int ret; void *addr; *virtual = NULL; - ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) + ret = ttm_mem_io_reserve(bdev, mem); + if (ret || !mem->bus.is_iomem) return ret; - if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) - addr = (void *)(((u8 *) man->io_addr) + bus_offset); - else { + if (mem->bus.addr) { + addr = mem->bus.addr; + } else { if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(bus_base + bus_offset, bus_size); + addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else - addr = ioremap_nocache(bus_base + bus_offset, bus_size); - if (!addr) + addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + if (!addr) { + ttm_mem_io_free(bdev, mem); return -ENOMEM; + } } *virtual = addr; return 0; @@ -116,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, man = &bdev->man[mem->mem_type]; - if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) + if (virtual && mem->bus.addr == NULL) iounmap(virtual); + ttm_mem_io_free(bdev, mem); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -207,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait, struct ttm_mem_reg *new_mem) + bool evict, bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; @@ -368,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) EXPORT_SYMBOL(ttm_io_prot); static int ttm_bo_ioremap(struct ttm_buffer_object *bo, - unsigned long bus_base, - unsigned long bus_offset, - unsigned long bus_size, + unsigned long offset, + unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *mem = &bo->mem; - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { + if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; - map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); } else { map->bo_kmap_type = ttm_bo_map_iomap; if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bus_base + bus_offset, - bus_size); + map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, + size); else - map->virtual = ioremap_nocache(bus_base + bus_offset, - bus_size); + map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, + size); } return (!map->virtual) ? -ENOMEM : 0; } @@ -440,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { + unsigned long offset, size; int ret; - unsigned long bus_base; - unsigned long bus_offset; - unsigned long bus_size; BUG_ON(!list_empty(&bo->swap)); map->virtual = NULL; + map->bo = bo; if (num_pages > bo->num_pages) return -EINVAL; if (start_page > bo->num_pages) @@ -455,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) return -EPERM; #endif - ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, - &bus_offset, &bus_size); + ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); if (ret) return ret; - if (bus_size == 0) { + if (!bo->mem.bus.is_iomem) { return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); } else { - bus_offset += start_page << PAGE_SHIFT; - bus_size = num_pages << PAGE_SHIFT; - return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + offset = start_page << PAGE_SHIFT; + size = num_pages << PAGE_SHIFT; + return ttm_bo_ioremap(bo, offset, size, map); } } EXPORT_SYMBOL(ttm_bo_kmap); @@ -476,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) switch (map->bo_kmap_type) { case ttm_bo_map_iomap: iounmap(map->virtual); + ttm_mem_io_free(map->bo->bdev, &map->bo->mem); break; case ttm_bo_map_vmap: vunmap(map->virtual); @@ -493,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, - unsigned long dst_offset, - unsigned long *pfn, pgprot_t *prot) -{ - struct ttm_mem_reg *mem = &bo->mem; - struct ttm_bo_device *bdev = bo->bdev; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; - int ret; - ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, - &bus_size); - if (ret) - return -EINVAL; - if (bus_size != 0) - *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; - else - if (!bo->ttm) - return -EINVAL; - else - *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, - dst_offset >> - PAGE_SHIFT)); - *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? - PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); - - return 0; -} - int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, void *sync_obj_arg, - bool evict, bool no_wait, + bool evict, bool no_wait_reserve, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 668dbe8b8dd3..fe6cb77899f4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; struct ttm_bo_device *bdev = bo->bdev; - unsigned long bus_base; - unsigned long bus_offset; - unsigned long bus_size; unsigned long page_offset; unsigned long page_last; unsigned long pfn; @@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page; int ret; int i; - bool is_iomem; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; @@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } - if (bdev->driver->fault_reserve_notify) - bdev->driver->fault_reserve_notify(bo); + if (bdev->driver->fault_reserve_notify) { + ret = bdev->driver->fault_reserve_notify(bo); + switch (ret) { + case 0: + break; + case -EBUSY: + set_need_resched(); + case -ERESTARTSYS: + retval = VM_FAULT_NOPAGE; + goto out_unlock; + default: + retval = VM_FAULT_SIGBUS; + goto out_unlock; + } + } /* * Wait for buffer data in transit, due to a pipelined @@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) spin_unlock(&bo->lock); - ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, - &bus_size); - if (unlikely(ret != 0)) { + ret = ttm_mem_io_reserve(bdev, &bo->mem); + if (ret) { retval = VM_FAULT_SIGBUS; goto out_unlock; } - is_iomem = (bus_size != 0); - page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + bo->vm_node->start - vma->vm_pgoff; page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + @@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * vma->vm_page_prot when the object changes caching policy, with * the correct locks held. */ - - if (is_iomem) { + if (bo->mem.bus.is_iomem) { vma->vm_page_prot = ttm_io_prot(bo->mem.placement, vma->vm_page_prot); } else { @@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) */ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { - - if (is_iomem) - pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + - page_offset; + if (bo->mem.bus.is_iomem) + pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; else { page = ttm_tt_get_page(ttm, page_offset); if (unlikely(!page && i == 0)) { @@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) retval = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_unlock; - } address += PAGE_SIZE; @@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) static void ttm_bo_vm_close(struct vm_area_struct *vma) { - struct ttm_buffer_object *bo = - (struct ttm_buffer_object *)vma->vm_private_data; + struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_unref(&bo); vma->vm_private_data = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 3d172ef04ee1..de41e55a944a 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -204,7 +204,6 @@ static int __ttm_vt_unlock(struct ttm_lock *lock) lock->flags &= ~TTM_VT_LOCK; wake_up_all(&lock->queue); spin_unlock(&lock->lock); - printk(KERN_INFO TTM_PFX "vt unlock.\n"); return ret; } @@ -265,10 +264,8 @@ int ttm_vt_lock(struct ttm_lock *lock, ttm_lock_type, &ttm_vt_lock_remove, NULL); if (ret) (void)__ttm_vt_unlock(lock); - else { + else lock->vt_holder = tfile; - printk(KERN_INFO TTM_PFX "vt lock.\n"); - } return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index eb143e04d402..e70ddd82dc02 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -27,11 +27,13 @@ #include "ttm/ttm_memory.h" #include "ttm/ttm_module.h" +#include "ttm/ttm_page_alloc.h" #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/module.h> +#include <linux/slab.h> #define TTM_MEMORY_ALLOC_RETRIES 4 @@ -260,8 +262,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, zone->used_mem = 0; zone->glob = glob; glob->zone_kernel = zone; - kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); - ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; @@ -296,8 +298,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, zone->used_mem = 0; zone->glob = glob; glob->zone_highmem = zone; - kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); - ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; @@ -343,8 +345,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, zone->used_mem = 0; zone->glob = glob; glob->zone_dma32 = zone; - kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); - ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; @@ -365,10 +367,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); init_waitqueue_head(&glob->queue); - kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); - ret = kobject_add(&glob->kobj, - ttm_get_kobj(), - "memory_accounting"); + ret = kobject_init_and_add( + &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); if (unlikely(ret != 0)) { kobject_put(&glob->kobj); return ret; @@ -394,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) "Zone %7s: Available graphics memory: %llu kiB.\n", zone->name, (unsigned long long) zone->max_mem >> 10); } + ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); @@ -406,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) unsigned int i; struct ttm_mem_zone *zone; + /* let the page allocator first stop the shrink work. */ + ttm_page_alloc_fini(); + flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; @@ -413,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) zone = glob->zones[i]; kobject_del(&zone->kobj); kobject_put(&zone->kobj); - } + } kobject_del(&glob->kobj); kobject_put(&glob->kobj); } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c new file mode 100644 index 000000000000..ca904799f018 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -0,0 +1,855 @@ +/* + * Copyright (c) Red Hat Inc. + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie <airlied@redhat.com> + * Jerome Glisse <jglisse@redhat.com> + * Pauli Nieminen <suokkos@gmail.com> + */ + +/* simple list based uncached page pool + * - Pool collects resently freed pages for reuse + * - Use page->lru to keep a free list + * - doesn't track currently in use pages + */ +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/highmem.h> +#include <linux/mm_types.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/seq_file.h> /* for seq_printf */ +#include <linux/slab.h> + +#include <asm/atomic.h> + +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_page_alloc.h" + +#ifdef TTM_HAS_AGP +#include <asm/agp.h> +#endif + +#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) +#define SMALL_ALLOCATION 16 +#define FREE_ALL_PAGES (~0U) +/* times are in msecs */ +#define PAGE_FREE_INTERVAL 1000 + +/** + * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. + * + * @lock: Protects the shared pool from concurrnet access. Must be used with + * irqsave/irqrestore variants because pool allocator maybe called from + * delayed work. + * @fill_lock: Prevent concurrent calls to fill. + * @list: Pool of free uc/wc pages for fast reuse. + * @gfp_flags: Flags to pass for alloc_page. + * @npages: Number of pages in pool. + */ +struct ttm_page_pool { + spinlock_t lock; + bool fill_lock; + struct list_head list; + int gfp_flags; + unsigned npages; + char *name; + unsigned long nfrees; + unsigned long nrefills; +}; + +/** + * Limits for the pool. They are handled without locks because only place where + * they may change is in sysfs store. They won't have immediate effect anyway + * so forcing serialization to access them is pointless. + */ + +struct ttm_pool_opts { + unsigned alloc_size; + unsigned max_size; + unsigned small; +}; + +#define NUM_POOLS 4 + +/** + * struct ttm_pool_manager - Holds memory pools for fst allocation + * + * Manager is read only object for pool code so it doesn't need locking. + * + * @free_interval: minimum number of jiffies between freeing pages from pool. + * @page_alloc_inited: reference counting for pool allocation. + * @work: Work that is used to shrink the pool. Work is only run when there is + * some pages to free. + * @small_allocation: Limit in number of pages what is small allocation. + * + * @pools: All pool objects in use. + **/ +struct ttm_pool_manager { + struct kobject kobj; + struct shrinker mm_shrink; + struct ttm_pool_opts options; + + union { + struct ttm_page_pool pools[NUM_POOLS]; + struct { + struct ttm_page_pool wc_pool; + struct ttm_page_pool uc_pool; + struct ttm_page_pool wc_pool_dma32; + struct ttm_page_pool uc_pool_dma32; + } ; + }; +}; + +static struct attribute ttm_page_pool_max = { + .name = "pool_max_size", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_page_pool_small = { + .name = "pool_small_allocation", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_page_pool_alloc_size = { + .name = "pool_allocation_size", + .mode = S_IRUGO | S_IWUSR +}; + +static struct attribute *ttm_pool_attrs[] = { + &ttm_page_pool_max, + &ttm_page_pool_small, + &ttm_page_pool_alloc_size, + NULL +}; + +static void ttm_pool_kobj_release(struct kobject *kobj) +{ + struct ttm_pool_manager *m = + container_of(kobj, struct ttm_pool_manager, kobj); + kfree(m); +} + +static ssize_t ttm_pool_store(struct kobject *kobj, + struct attribute *attr, const char *buffer, size_t size) +{ + struct ttm_pool_manager *m = + container_of(kobj, struct ttm_pool_manager, kobj); + int chars; + unsigned val; + chars = sscanf(buffer, "%u", &val); + if (chars == 0) + return size; + + /* Convert kb to number of pages */ + val = val / (PAGE_SIZE >> 10); + + if (attr == &ttm_page_pool_max) + m->options.max_size = val; + else if (attr == &ttm_page_pool_small) + m->options.small = val; + else if (attr == &ttm_page_pool_alloc_size) { + if (val > NUM_PAGES_TO_ALLOC*8) { + printk(KERN_ERR TTM_PFX + "Setting allocation size to %lu " + "is not allowed. Recommended size is " + "%lu\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + return size; + } else if (val > NUM_PAGES_TO_ALLOC) { + printk(KERN_WARNING TTM_PFX + "Setting allocation size to " + "larger than %lu is not recommended.\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + } + m->options.alloc_size = val; + } + + return size; +} + +static ssize_t ttm_pool_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct ttm_pool_manager *m = + container_of(kobj, struct ttm_pool_manager, kobj); + unsigned val = 0; + + if (attr == &ttm_page_pool_max) + val = m->options.max_size; + else if (attr == &ttm_page_pool_small) + val = m->options.small; + else if (attr == &ttm_page_pool_alloc_size) + val = m->options.alloc_size; + + val = val * (PAGE_SIZE >> 10); + + return snprintf(buffer, PAGE_SIZE, "%u\n", val); +} + +static const struct sysfs_ops ttm_pool_sysfs_ops = { + .show = &ttm_pool_show, + .store = &ttm_pool_store, +}; + +static struct kobj_type ttm_pool_kobj_type = { + .release = &ttm_pool_kobj_release, + .sysfs_ops = &ttm_pool_sysfs_ops, + .default_attrs = ttm_pool_attrs, +}; + +static struct ttm_pool_manager *_manager; + +#ifndef CONFIG_X86 +static int set_pages_array_wb(struct page **pages, int addrinarray) +{ +#ifdef TTM_HAS_AGP + int i; + + for (i = 0; i < addrinarray; i++) + unmap_page_from_agp(pages[i]); +#endif + return 0; +} + +static int set_pages_array_wc(struct page **pages, int addrinarray) +{ +#ifdef TTM_HAS_AGP + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); +#endif + return 0; +} + +static int set_pages_array_uc(struct page **pages, int addrinarray) +{ +#ifdef TTM_HAS_AGP + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); +#endif + return 0; +} +#endif + +/** + * Select the right pool or requested caching state and ttm flags. */ +static struct ttm_page_pool *ttm_get_pool(int flags, + enum ttm_caching_state cstate) +{ + int pool_index; + + if (cstate == tt_cached) + return NULL; + + if (cstate == tt_wc) + pool_index = 0x0; + else + pool_index = 0x1; + + if (flags & TTM_PAGE_FLAG_DMA32) + pool_index |= 0x2; + + return &_manager->pools[pool_index]; +} + +/* set memory back to wb and free the pages. */ +static void ttm_pages_put(struct page *pages[], unsigned npages) +{ + unsigned i; + if (set_pages_array_wb(pages, npages)) + printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", + npages); + for (i = 0; i < npages; ++i) + __free_page(pages[i]); +} + +static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, + unsigned freed_pages) +{ + pool->npages -= freed_pages; + pool->nfrees += freed_pages; +} + +/** + * Free pages from pool. + * + * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC + * number of pages in one go. + * + * @pool: to free the pages from + * @free_all: If set to true will free all pages in pool + **/ +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +{ + unsigned long irq_flags; + struct page *p; + struct page **pages_to_free; + unsigned freed_pages = 0, + npages_to_free = nr_free; + + if (NUM_PAGES_TO_ALLOC < nr_free) + npages_to_free = NUM_PAGES_TO_ALLOC; + + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); + if (!pages_to_free) { + printk(KERN_ERR TTM_PFX + "Failed to allocate memory for pool free operation.\n"); + return 0; + } + +restart: + spin_lock_irqsave(&pool->lock, irq_flags); + + list_for_each_entry_reverse(p, &pool->list, lru) { + if (freed_pages >= npages_to_free) + break; + + pages_to_free[freed_pages++] = p; + /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ + if (freed_pages >= NUM_PAGES_TO_ALLOC) { + /* remove range of pages from the pool */ + __list_del(p->lru.prev, &pool->list); + + ttm_pool_update_free_locked(pool, freed_pages); + /** + * Because changing page caching is costly + * we unlock the pool to prevent stalling. + */ + spin_unlock_irqrestore(&pool->lock, irq_flags); + + ttm_pages_put(pages_to_free, freed_pages); + if (likely(nr_free != FREE_ALL_PAGES)) + nr_free -= freed_pages; + + if (NUM_PAGES_TO_ALLOC >= nr_free) + npages_to_free = nr_free; + else + npages_to_free = NUM_PAGES_TO_ALLOC; + + freed_pages = 0; + + /* free all so restart the processing */ + if (nr_free) + goto restart; + + /* Not allowed to fall tough or break because + * following context is inside spinlock while we are + * outside here. + */ + goto out; + + } + } + + /* remove range of pages from the pool */ + if (freed_pages) { + __list_del(&p->lru, &pool->list); + + ttm_pool_update_free_locked(pool, freed_pages); + nr_free -= freed_pages; + } + + spin_unlock_irqrestore(&pool->lock, irq_flags); + + if (freed_pages) + ttm_pages_put(pages_to_free, freed_pages); +out: + kfree(pages_to_free); + return nr_free; +} + +/* Get good estimation how many pages are free in pools */ +static int ttm_pool_get_num_unused_pages(void) +{ + unsigned i; + int total = 0; + for (i = 0; i < NUM_POOLS; ++i) + total += _manager->pools[i].npages; + + return total; +} + +/** + * Callback for mm to request pool to reduce number of page held. + */ +static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) +{ + static atomic_t start_pool = ATOMIC_INIT(0); + unsigned i; + unsigned pool_offset = atomic_add_return(1, &start_pool); + struct ttm_page_pool *pool; + + pool_offset = pool_offset % NUM_POOLS; + /* select start pool in round robin fashion */ + for (i = 0; i < NUM_POOLS; ++i) { + unsigned nr_free = shrink_pages; + if (shrink_pages == 0) + break; + pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; + shrink_pages = ttm_page_pool_free(pool, nr_free); + } + /* return estimated number of unused pages in pool */ + return ttm_pool_get_num_unused_pages(); +} + +static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) +{ + manager->mm_shrink.shrink = &ttm_pool_mm_shrink; + manager->mm_shrink.seeks = 1; + register_shrinker(&manager->mm_shrink); +} + +static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) +{ + unregister_shrinker(&manager->mm_shrink); +} + +static int ttm_set_pages_caching(struct page **pages, + enum ttm_caching_state cstate, unsigned cpages) +{ + int r = 0; + /* Set page caching */ + switch (cstate) { + case tt_uncached: + r = set_pages_array_uc(pages, cpages); + if (r) + printk(KERN_ERR TTM_PFX + "Failed to set %d pages to uc!\n", + cpages); + break; + case tt_wc: + r = set_pages_array_wc(pages, cpages); + if (r) + printk(KERN_ERR TTM_PFX + "Failed to set %d pages to wc!\n", + cpages); + break; + default: + break; + } + return r; +} + +/** + * Free pages the pages that failed to change the caching state. If there is + * any pages that have changed their caching state already put them to the + * pool. + */ +static void ttm_handle_caching_state_failure(struct list_head *pages, + int ttm_flags, enum ttm_caching_state cstate, + struct page **failed_pages, unsigned cpages) +{ + unsigned i; + /* Failed pages have to be freed */ + for (i = 0; i < cpages; ++i) { + list_del(&failed_pages[i]->lru); + __free_page(failed_pages[i]); + } +} + +/** + * Allocate new pages with correct caching. + * + * This function is reentrant if caller updates count depending on number of + * pages returned in pages array. + */ +static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, + int ttm_flags, enum ttm_caching_state cstate, unsigned count) +{ + struct page **caching_array; + struct page *p; + int r = 0; + unsigned i, cpages; + unsigned max_cpages = min(count, + (unsigned)(PAGE_SIZE/sizeof(struct page *))); + + /* allocate array for page caching change */ + caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); + + if (!caching_array) { + printk(KERN_ERR TTM_PFX + "Unable to allocate table for new pages."); + return -ENOMEM; + } + + for (i = 0, cpages = 0; i < count; ++i) { + p = alloc_page(gfp_flags); + + if (!p) { + printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); + + /* store already allocated pages in the pool after + * setting the caching state */ + if (cpages) { + r = ttm_set_pages_caching(caching_array, + cstate, cpages); + if (r) + ttm_handle_caching_state_failure(pages, + ttm_flags, cstate, + caching_array, cpages); + } + r = -ENOMEM; + goto out; + } + +#ifdef CONFIG_HIGHMEM + /* gfp flags of highmem page should never be dma32 so we + * we should be fine in such case + */ + if (!PageHighMem(p)) +#endif + { + caching_array[cpages++] = p; + if (cpages == max_cpages) { + + r = ttm_set_pages_caching(caching_array, + cstate, cpages); + if (r) { + ttm_handle_caching_state_failure(pages, + ttm_flags, cstate, + caching_array, cpages); + goto out; + } + cpages = 0; + } + } + + list_add(&p->lru, pages); + } + + if (cpages) { + r = ttm_set_pages_caching(caching_array, cstate, cpages); + if (r) + ttm_handle_caching_state_failure(pages, + ttm_flags, cstate, + caching_array, cpages); + } +out: + kfree(caching_array); + + return r; +} + +/** + * Fill the given pool if there isn't enough pages and requested number of + * pages is small. + */ +static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, + int ttm_flags, enum ttm_caching_state cstate, unsigned count, + unsigned long *irq_flags) +{ + struct page *p; + int r; + unsigned cpages = 0; + /** + * Only allow one pool fill operation at a time. + * If pool doesn't have enough pages for the allocation new pages are + * allocated from outside of pool. + */ + if (pool->fill_lock) + return; + + pool->fill_lock = true; + + /* If allocation request is small and there is not enough + * pages in pool we fill the pool first */ + if (count < _manager->options.small + && count > pool->npages) { + struct list_head new_pages; + unsigned alloc_size = _manager->options.alloc_size; + + /** + * Can't change page caching if in irqsave context. We have to + * drop the pool->lock. + */ + spin_unlock_irqrestore(&pool->lock, *irq_flags); + + INIT_LIST_HEAD(&new_pages); + r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, + cstate, alloc_size); + spin_lock_irqsave(&pool->lock, *irq_flags); + + if (!r) { + list_splice(&new_pages, &pool->list); + ++pool->nrefills; + pool->npages += alloc_size; + } else { + printk(KERN_ERR TTM_PFX + "Failed to fill pool (%p).", pool); + /* If we have any pages left put them to the pool. */ + list_for_each_entry(p, &pool->list, lru) { + ++cpages; + } + list_splice(&new_pages, &pool->list); + pool->npages += cpages; + } + + } + pool->fill_lock = false; +} + +/** + * Cut count nubmer of pages from the pool and put them to return list + * + * @return count of pages still to allocate to fill the request. + */ +static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, + struct list_head *pages, int ttm_flags, + enum ttm_caching_state cstate, unsigned count) +{ + unsigned long irq_flags; + struct list_head *p; + unsigned i; + + spin_lock_irqsave(&pool->lock, irq_flags); + ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); + + if (count >= pool->npages) { + /* take all pages from the pool */ + list_splice_init(&pool->list, pages); + count -= pool->npages; + pool->npages = 0; + goto out; + } + /* find the last pages to include for requested number of pages. Split + * pool to begin and halves to reduce search space. */ + if (count <= pool->npages/2) { + i = 0; + list_for_each(p, &pool->list) { + if (++i == count) + break; + } + } else { + i = pool->npages + 1; + list_for_each_prev(p, &pool->list) { + if (--i == count) + break; + } + } + /* Cut count number of pages from pool */ + list_cut_position(pages, &pool->list, p); + pool->npages -= count; + count = 0; +out: + spin_unlock_irqrestore(&pool->lock, irq_flags); + return count; +} + +/* + * On success pages list will hold count number of correctly + * cached pages. + */ +int ttm_get_pages(struct list_head *pages, int flags, + enum ttm_caching_state cstate, unsigned count) +{ + struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); + struct page *p = NULL; + int gfp_flags = GFP_USER; + int r; + + /* set zero flag for page allocation if required */ + if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + + /* No pool for cached pages */ + if (pool == NULL) { + if (flags & TTM_PAGE_FLAG_DMA32) + gfp_flags |= GFP_DMA32; + else + gfp_flags |= GFP_HIGHUSER; + + for (r = 0; r < count; ++r) { + p = alloc_page(gfp_flags); + if (!p) { + + printk(KERN_ERR TTM_PFX + "Unable to allocate page."); + return -ENOMEM; + } + + list_add(&p->lru, pages); + } + return 0; + } + + + /* combine zero flag to pool flags */ + gfp_flags |= pool->gfp_flags; + + /* First we take pages from the pool */ + count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); + + /* clear the pages coming from the pool if requested */ + if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { + list_for_each_entry(p, pages, lru) { + clear_page(page_address(p)); + } + } + + /* If pool didn't have enough pages allocate new one. */ + if (count > 0) { + /* ttm_alloc_new_pages doesn't reference pool so we can run + * multiple requests in parallel. + **/ + r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); + if (r) { + /* If there is any pages in the list put them back to + * the pool. */ + printk(KERN_ERR TTM_PFX + "Failed to allocate extra pages " + "for large request."); + ttm_put_pages(pages, 0, flags, cstate); + return r; + } + } + + + return 0; +} + +/* Put all pages in pages list to correct pool to wait for reuse */ +void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, + enum ttm_caching_state cstate) +{ + unsigned long irq_flags; + struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); + struct page *p, *tmp; + + if (pool == NULL) { + /* No pool for this memory type so free the pages */ + + list_for_each_entry_safe(p, tmp, pages, lru) { + __free_page(p); + } + /* Make the pages list empty */ + INIT_LIST_HEAD(pages); + return; + } + if (page_count == 0) { + list_for_each_entry_safe(p, tmp, pages, lru) { + ++page_count; + } + } + + spin_lock_irqsave(&pool->lock, irq_flags); + list_splice_init(pages, &pool->list); + pool->npages += page_count; + /* Check that we don't go over the pool limit */ + page_count = 0; + if (pool->npages > _manager->options.max_size) { + page_count = pool->npages - _manager->options.max_size; + /* free at least NUM_PAGES_TO_ALLOC number of pages + * to reduce calls to set_memory_wb */ + if (page_count < NUM_PAGES_TO_ALLOC) + page_count = NUM_PAGES_TO_ALLOC; + } + spin_unlock_irqrestore(&pool->lock, irq_flags); + if (page_count) + ttm_page_pool_free(pool, page_count); +} + +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, + char *name) +{ + spin_lock_init(&pool->lock); + pool->fill_lock = false; + INIT_LIST_HEAD(&pool->list); + pool->npages = pool->nfrees = 0; + pool->gfp_flags = flags; + pool->name = name; +} + +int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) +{ + int ret; + + WARN_ON(_manager); + + printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); + + _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); + + ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); + + ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); + + ttm_page_pool_init_locked(&_manager->wc_pool_dma32, + GFP_USER | GFP_DMA32, "wc dma"); + + ttm_page_pool_init_locked(&_manager->uc_pool_dma32, + GFP_USER | GFP_DMA32, "uc dma"); + + _manager->options.max_size = max_pages; + _manager->options.small = SMALL_ALLOCATION; + _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; + + ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, + &glob->kobj, "pool"); + if (unlikely(ret != 0)) { + kobject_put(&_manager->kobj); + _manager = NULL; + return ret; + } + + ttm_pool_mm_shrink_init(_manager); + + return 0; +} + +void ttm_page_alloc_fini() +{ + int i; + + printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); + ttm_pool_mm_shrink_fini(_manager); + + for (i = 0; i < NUM_POOLS; ++i) + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); + + kobject_put(&_manager->kobj); + _manager = NULL; +} + +int ttm_page_alloc_debugfs(struct seq_file *m, void *data) +{ + struct ttm_page_pool *p; + unsigned i; + char *h[] = {"pool", "refills", "pages freed", "size"}; + if (!_manager) { + seq_printf(m, "No pool allocator running.\n"); + return 0; + } + seq_printf(m, "%6s %12s %13s %8s\n", + h[0], h[1], h[2], h[3]); + for (i = 0; i < NUM_POOLS; ++i) { + p = &_manager->pools[i]; + + seq_printf(m, "%6s %12ld %13ld %8d\n", + p->name, p->nrefills, + p->nfrees, p->npages); + } + return 0; +} +EXPORT_SYMBOL(ttm_page_alloc_debugfs); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a759170763bb..a7bab87a548b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -28,65 +28,35 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/swap.h> +#include <linux/slab.h> #include "drm_cache.h" +#include "drm_mem_util.h" #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" +#include "ttm/ttm_page_alloc.h" static int ttm_tt_swapin(struct ttm_tt *ttm); /** * Allocates storage for pointers to the pages that back the ttm. - * - * Uses kmalloc if possible. Otherwise falls back to vmalloc. */ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) { - unsigned long size = ttm->num_pages * sizeof(*ttm->pages); - ttm->pages = NULL; - - if (size <= PAGE_SIZE) - ttm->pages = kzalloc(size, GFP_KERNEL); - - if (!ttm->pages) { - ttm->pages = vmalloc_user(size); - if (ttm->pages) - ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; - } + ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); } static void ttm_tt_free_page_directory(struct ttm_tt *ttm) { - if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { - vfree(ttm->pages); - ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; - } else { - kfree(ttm->pages); - } + drm_free_large(ttm->pages); ttm->pages = NULL; } -static struct page *ttm_tt_alloc_page(unsigned page_flags) -{ - gfp_t gfp_flags = GFP_USER; - - if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (page_flags & TTM_PAGE_FLAG_DMA32) - gfp_flags |= __GFP_DMA32; - else - gfp_flags |= __GFP_HIGHMEM; - - return alloc_page(gfp_flags); -} - static void ttm_tt_free_user_pages(struct ttm_tt *ttm) { int write; @@ -127,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; + struct list_head h; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret; while (NULL == (p = ttm->pages[index])) { - p = ttm_tt_alloc_page(ttm->page_flags); - if (!p) + INIT_LIST_HEAD(&h); + + ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); + + if (ret != 0) return NULL; + p = list_first_entry(&h, struct page, lru); + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); if (unlikely(ret != 0)) goto out_err; @@ -244,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, if (ttm->caching_state == c_state) return 0; - if (c_state != tt_cached) { - ret = ttm_tt_populate(ttm); - if (unlikely(ret != 0)) - return ret; + if (ttm->state == tt_unpopulated) { + /* Change caching but don't populate */ + ttm->caching_state = c_state; + return 0; } if (ttm->caching_state == tt_cached) @@ -298,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) { int i; + unsigned count = 0; + struct list_head h; struct page *cur_page; struct ttm_backend *be = ttm->be; + INIT_LIST_HEAD(&h); + if (be) be->func->clear(be); - (void)ttm_tt_set_caching(ttm, tt_cached); for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages[i]; ttm->pages[i] = NULL; if (cur_page) { @@ -314,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) "Leaking pages.\n"); ttm_mem_global_free_page(ttm->glob->mem_glob, cur_page); - __free_page(cur_page); + list_add(&cur_page->lru, &h); + count++; } } + ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); ttm->state = tt_unpopulated; ttm->first_himem_page = ttm->num_pages; ttm->last_lomem_page = -1; diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 327380888b4a..4c54f043068e 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -40,6 +40,7 @@ #include "via_dmablit.h" #include <linux/pagemap.h> +#include <linux/slab.h> #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c index 6ec04ac12459..6efac8117c93 100644 --- a/drivers/gpu/drm/via/via_video.c +++ b/drivers/gpu/drm/via/via_video.c @@ -75,7 +75,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_ DRM_DEBUG("\n"); - if (fx->lock > VIA_NR_XVMC_LOCKS) + if (fx->lock >= VIA_NR_XVMC_LOCKS) return -EFAULT; lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index f20b8bcbef39..30ad13344f7b 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -1,6 +1,6 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" - depends on DRM && PCI + depends on DRM && PCI && FB select FB_DEFERRED_IO select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 1a3cb6816d1c..4505e17df3f5 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ - vmwgfx_overlay.o + vmwgfx_overlay.o vmwgfx_fence.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 825ebe3d89d5..c4f5114aee7c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { - struct vmw_private *dev_priv = - container_of(bdev, struct vmw_private, bdev); - switch (type) { case TTM_PL_SYSTEM: /* System memory */ @@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_VRAM: /* "On-card" video ram */ man->gpu_offset = 0; - man->io_offset = dev_priv->vram_start; - man->io_size = dev_priv->vram_size; - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; - man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_WC; break; @@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) vmw_dmabuf_gmr_unbind(bo); } +static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); + + mem->bus.addr = NULL; + mem->bus.is_iomem = false; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* System memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = dev_priv->vram_start; + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + +static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + return 0; +} + /** * FIXME: We're using the old vmware polling method to sync. * Do this with fences instead. @@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = { .sync_obj_unref = vmw_sync_obj_unref, .sync_obj_ref = vmw_sync_obj_ref, .move_notify = vmw_move_notify, - .swap_notify = vmw_swap_notify + .swap_notify = vmw_swap_notify, + .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, + .io_mem_reserve = &vmw_ttm_io_mem_reserve, + .io_mem_free = &vmw_ttm_io_mem_free, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c9c0811f42d..b793c8c9acb3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -88,6 +88,9 @@ #define DRM_IOCTL_VMW_FENCE_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ struct drm_vmw_fence_wait_arg) +#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ + struct drm_vmw_update_layout_arg) /** @@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, - DRM_AUTH | DRM_UNLOCKED) + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, + DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) }; static struct pci_device_id vmw_pci_id_list[] = { @@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err3; } + /* Need mmio memory to check for fifo pitchlock cap. */ + if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && + !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && + !vmw_fifo_have_pitchlock(dev_priv)) { + ret = -ENOSYS; + DRM_ERROR("Hardware has no pitchlock\n"); + goto out_err4; + } + dev_priv->tdev = ttm_object_device_init (dev_priv->mem_global_ref.object, 12); @@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); - DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); - unregister_pm_notifier(&dev_priv->pm_nb); vmw_fb_close(dev_priv); @@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev, { struct vmw_master *vmaster; - DRM_INFO("Master create.\n"); vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); if (unlikely(vmaster == NULL)) return -ENOMEM; @@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev, { struct vmw_master *vmaster = vmw_master(master); - DRM_INFO("Master destroy.\n"); master->driver_priv = NULL; kfree(vmaster); } @@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev, struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; - DRM_INFO("Master set.\n"); - if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); @@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev, struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; - DRM_INFO("Master drop.\n"); - /** * Make sure the master doesn't disappear while we have * it locked. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 356dc935ec13..eaad52095339 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -41,12 +41,13 @@ #define VMWGFX_DRIVER_DATE "20100209" #define VMWGFX_DRIVER_MAJOR 1 -#define VMWGFX_DRIVER_MINOR 0 +#define VMWGFX_DRIVER_MINOR 2 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 #define VMWGFX_MAX_GMRS 2048 +#define VMWGFX_MAX_DISPLAYS 16 struct vmw_fpriv { struct drm_master *locked_master; @@ -102,6 +103,13 @@ struct vmw_surface { struct vmw_cursor_snooper snooper; }; +struct vmw_fence_queue { + struct list_head head; + struct timespec lag; + struct timespec lag_time; + spinlock_t lock; +}; + struct vmw_fifo_state { unsigned long reserved_size; __le32 *dynamic_buffer; @@ -115,6 +123,7 @@ struct vmw_fifo_state { uint32_t capabilities; struct mutex fifo_mutex; struct rw_semaphore rwsem; + struct vmw_fence_queue fence_queue; }; struct vmw_relocation { @@ -144,6 +153,14 @@ struct vmw_master { struct ttm_lock lock; }; +struct vmw_vga_topology_state { + uint32_t width; + uint32_t height; + uint32_t primary; + uint32_t pos_x; + uint32_t pos_y; +}; + struct vmw_private { struct ttm_bo_device bdev; struct ttm_bo_global_ref bo_global_ref; @@ -171,14 +188,19 @@ struct vmw_private { * VGA registers. */ + struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; uint32_t vga_width; uint32_t vga_height; uint32_t vga_depth; uint32_t vga_bpp; uint32_t vga_pseudo; uint32_t vga_red_mask; - uint32_t vga_blue_mask; uint32_t vga_green_mask; + uint32_t vga_blue_mask; + uint32_t vga_bpl; + uint32_t vga_pitchlock; + + uint32_t num_displays; /* * Framebuffer info. @@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); +extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); /** * TTM glue - vmwgfx_ttm_glue.c @@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, uint32_t sequence, bool interruptible, unsigned long timeout); +extern void vmw_update_sequence(struct vmw_private *dev_priv, + struct vmw_fifo_state *fifo_state); + + +/** + * Rudimentary fence objects currently used only for throttling - + * vmwgfx_fence.c + */ + +extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); +extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); +extern int vmw_fence_push(struct vmw_fence_queue *queue, + uint32_t sequence); +extern int vmw_fence_pull(struct vmw_fence_queue *queue, + uint32_t signaled_sequence); +extern int vmw_wait_lag(struct vmw_private *dev_priv, + struct vmw_fence_queue *queue, uint32_t us); /** * Kernel framebuffer - vmwgfx_fb.c @@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, SVGA3dCmdHeader *header); +void vmw_kms_write_svga(struct vmw_private *vmw_priv, + unsigned width, unsigned height, unsigned pitch, + unsigned bbp, unsigned depth); +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** * Overlay control - vmwgfx_overlay.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 0897359b3e4e..8e396850513c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * Put BO in VRAM, only if there is space. */ - ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); if (unlikely(ret == -ERESTARTSYS)) return ret; @@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * previous contents. */ - ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); return ret; } @@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ret = copy_from_user(cmd, user_cmd, arg->command_size); if (unlikely(ret != 0)) { + ret = -EFAULT; DRM_ERROR("Failed copying commands.\n"); goto out_commit; } @@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, goto out_err; vmw_apply_relocations(sw_context); + + if (arg->throttle_us) { + ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, + arg->throttle_us); + + if (unlikely(ret != 0)) + goto out_err; + } + vmw_fifo_commit(dev_priv, arg->command_size); ret = vmw_fifo_send_fence(dev_priv, &sequence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index a93367041cdc..b0866f04ec76 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, return -EINVAL; } - /* without multimon its hard to resize */ - if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && - (var->xres != par->max_width || - var->yres != par->max_height)) { - DRM_ERROR("Tried to resize, but we don't have multimon\n"); + if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && + (var->xoffset != 0 || var->yoffset != 0)) { + DRM_ERROR("Can not handle panning without display topology\n"); return -EINVAL; } - if (var->xres > par->max_width || - var->yres > par->max_height) { + if ((var->xoffset + var->xres) > par->max_width || + (var->yoffset + var->yres) > par->max_height) { DRM_ERROR("Requested geom can not fit in framebuffer\n"); return -EINVAL; } @@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info) struct vmw_fb_par *par = info->par; struct vmw_private *vmw_priv = par->vmw_priv; - if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); - - vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); - vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); - vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); - vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); - vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); - vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); - vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); - vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); - + vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, + info->fix.line_length, + par->bpp, par->depth); + if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { /* TODO check if pitch and offset changes */ - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); @@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info) vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); - } else { - vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); - vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); - - /* TODO check if pitch and offset changes */ } + /* This is really helpful since if this fails the user + * can probably not see anything on the screen. + */ + WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); + return 0; } @@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; int ret; + /* XXX These shouldn't be hardcoded. */ initial_width = 800; initial_height = 600; fb_bbp = 32; fb_depth = 24; - if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { - fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); - fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); - } else { - fb_width = min(vmw_priv->fb_max_width, initial_width); - fb_height = min(vmw_priv->fb_max_height, initial_height); - } + /* XXX As shouldn't these be as well. */ + fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); + fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); initial_width = min(fb_width, initial_width); initial_height = min(fb_height, initial_height); - vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); - vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); - vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); - vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); - vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); - vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); - vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); - - fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); + fb_pitch = fb_width * fb_bbp / 8; + fb_size = fb_pitch * fb_height; fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); - fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); - - DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); - DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); - DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); - DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); - DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); - DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); - DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); - DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); - DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); - DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); - DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); - DRM_DEBUG("fb_pitch %u\n", fb_pitch); - DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); info = framebuffer_alloc(sizeof(*par), device); if (!info) @@ -559,8 +516,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->pixmap.scan_align = 1; #endif - info->aperture_base = vmw_priv->vram_start; - info->aperture_size = vmw_priv->vram_size; + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto err_aper; + } + info->apertures->ranges[0].base = vmw_priv->vram_start; + info->apertures->ranges[0].size = vmw_priv->vram_size; /* * Dirty & Deferred IO @@ -580,6 +542,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) err_defio: fb_deferred_io_cleanup(info); +err_aper: ttm_bo_kunmap(&par->map); err_unref: ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); @@ -628,7 +591,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, if (unlikely(ret != 0)) return ret; - ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); + ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); ttm_bo_unreserve(bo); return ret; @@ -652,7 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, if (unlikely(ret != 0)) goto err_unlock; - ret = ttm_bo_validate(bo, &ne_placement, false, false); + ret = ttm_bo_validate(bo, &ne_placement, false, false, false); + + /* Could probably bug on */ + WARN_ON(bo->offset != 0); + ttm_bo_unreserve(bo); err_unlock: ttm_write_unlock(&vmw_priv->active_master->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 000000000000..61eacc1b5ca3 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -0,0 +1,173 @@ +/************************************************************************** + * + * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + + +#include "vmwgfx_drv.h" + +struct vmw_fence { + struct list_head head; + uint32_t sequence; + struct timespec submitted; +}; + +void vmw_fence_queue_init(struct vmw_fence_queue *queue) +{ + INIT_LIST_HEAD(&queue->head); + queue->lag = ns_to_timespec(0); + getrawmonotonic(&queue->lag_time); + spin_lock_init(&queue->lock); +} + +void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) +{ + struct vmw_fence *fence, *next; + + spin_lock(&queue->lock); + list_for_each_entry_safe(fence, next, &queue->head, head) { + kfree(fence); + } + spin_unlock(&queue->lock); +} + +int vmw_fence_push(struct vmw_fence_queue *queue, + uint32_t sequence) +{ + struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); + + if (unlikely(!fence)) + return -ENOMEM; + + fence->sequence = sequence; + getrawmonotonic(&fence->submitted); + spin_lock(&queue->lock); + list_add_tail(&fence->head, &queue->head); + spin_unlock(&queue->lock); + + return 0; +} + +int vmw_fence_pull(struct vmw_fence_queue *queue, + uint32_t signaled_sequence) +{ + struct vmw_fence *fence, *next; + struct timespec now; + bool updated = false; + + spin_lock(&queue->lock); + getrawmonotonic(&now); + + if (list_empty(&queue->head)) { + queue->lag = ns_to_timespec(0); + queue->lag_time = now; + updated = true; + goto out_unlock; + } + + list_for_each_entry_safe(fence, next, &queue->head, head) { + if (signaled_sequence - fence->sequence > (1 << 30)) + continue; + + queue->lag = timespec_sub(now, fence->submitted); + queue->lag_time = now; + updated = true; + list_del(&fence->head); + kfree(fence); + } + +out_unlock: + spin_unlock(&queue->lock); + + return (updated) ? 0 : -EBUSY; +} + +static struct timespec vmw_timespec_add(struct timespec t1, + struct timespec t2) +{ + t1.tv_sec += t2.tv_sec; + t1.tv_nsec += t2.tv_nsec; + if (t1.tv_nsec >= 1000000000L) { + t1.tv_sec += 1; + t1.tv_nsec -= 1000000000L; + } + + return t1; +} + +static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) +{ + struct timespec now; + + spin_lock(&queue->lock); + getrawmonotonic(&now); + queue->lag = vmw_timespec_add(queue->lag, + timespec_sub(now, queue->lag_time)); + queue->lag_time = now; + spin_unlock(&queue->lock); + return queue->lag; +} + + +static bool vmw_lag_lt(struct vmw_fence_queue *queue, + uint32_t us) +{ + struct timespec lag, cond; + + cond = ns_to_timespec((s64) us * 1000); + lag = vmw_fifo_lag(queue); + return (timespec_compare(&lag, &cond) < 1); +} + +int vmw_wait_lag(struct vmw_private *dev_priv, + struct vmw_fence_queue *queue, uint32_t us) +{ + struct vmw_fence *fence; + uint32_t sequence; + int ret; + + while (!vmw_lag_lt(queue, us)) { + spin_lock(&queue->lock); + if (list_empty(&queue->head)) + sequence = atomic_read(&dev_priv->fence_seq); + else { + fence = list_first_entry(&queue->head, + struct vmw_fence, head); + sequence = fence->sequence; + } + spin_unlock(&queue->lock); + + ret = vmw_wait_fence(dev_priv, false, sequence, true, + 3*HZ); + + if (unlikely(ret != 0)) + return ret; + + (void) vmw_fence_pull(queue, sequence); + } + return 0; +} + + diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 39d43a01d846..e6a1eb7ea954 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) __le32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t fifo_min, hwversion; + if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) + return false; + fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) return false; @@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) return true; } +bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t caps; + + if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) + return false; + + caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); + if (caps & SVGA_FIFO_CAP_PITCHLOCK) + return true; + + return false; +} + int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; @@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); - + vmw_fence_queue_init(&fifo->fence_queue); return vmw_fifo_send_fence(dev_priv, &dummy); out_err: vfree(fifo->static_buffer); @@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) dev_priv->enable_state); mutex_unlock(&dev_priv->hw_mutex); + vmw_fence_queue_takedown(&fifo->fence_queue); if (likely(fifo->last_buffer != NULL)) { vfree(fifo->last_buffer); @@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) fifo_state->last_buffer_add = true; vmw_fifo_commit(dev_priv, bytes); fifo_state->last_buffer_add = false; + (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); + vmw_update_sequence(dev_priv, fifo_state); out_err: return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4d7cb5393860..e92298a6a383 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) return (busy == 0); } +void vmw_update_sequence(struct vmw_private *dev_priv, + struct vmw_fifo_state *fifo_state) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + + uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); + + if (dev_priv->last_read_sequence != sequence) { + dev_priv->last_read_sequence = sequence; + vmw_fence_pull(&fifo_state->fence_queue, sequence); + } +} bool vmw_fence_signaled(struct vmw_private *dev_priv, uint32_t sequence) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; struct vmw_fifo_state *fifo_state; bool ret; if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) return true; - dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); + fifo_state = &dev_priv->fifo; + vmw_update_sequence(dev_priv, fifo_state); if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) return true; - fifo_state = &dev_priv->fifo; if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && vmw_fifo_idle(dev_priv, sequence)) return true; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 31f9afed0a63..437ac786277a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -30,6 +30,8 @@ /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) +static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); +static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); void vmw_display_unit_cleanup(struct vmw_display_unit *du) { @@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, struct vmw_framebuffer_surface { struct vmw_framebuffer base; struct vmw_surface *surface; + struct vmw_dma_buffer *buffer; struct delayed_work d_work; struct mutex work_lock; bool present_fs; @@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, vfbs->base.base.depth = 24; vfbs->base.base.width = width; vfbs->base.base.height = height; - vfbs->base.pin = NULL; - vfbs->base.unpin = NULL; + vfbs->base.pin = &vmw_surface_dmabuf_pin; + vfbs->base.unpin = &vmw_surface_dmabuf_unpin; vfbs->surface = surface; mutex_init(&vfbs->work_lock); INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); @@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { .create_handle = vmw_framebuffer_create_handle, }; +static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) +{ + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); + struct vmw_framebuffer_surface *vfbs = + vmw_framebuffer_to_vfbs(&vfb->base); + unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; + int ret; + + vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); + if (unlikely(vfbs->buffer == NULL)) + return -ENOMEM; + + vmw_overlay_pause_all(dev_priv); + ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, + &vmw_vram_ne_placement, + false, &vmw_dmabuf_bo_free); + vmw_overlay_resume_all(dev_priv); + + return ret; +} + +static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) +{ + struct ttm_buffer_object *bo; + struct vmw_framebuffer_surface *vfbs = + vmw_framebuffer_to_vfbs(&vfb->base); + + bo = &vfbs->buffer->base; + ttm_bo_unref(&bo); + vfbs->buffer = NULL; + + return 0; +} + static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); @@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) vmw_framebuffer_to_vfbd(&vfb->base); int ret; + vmw_overlay_pause_all(dev_priv); ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); - if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { - vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); - vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); - vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); - - vmw_write(dev_priv, SVGA_REG_ENABLE, 1); - vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); - vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); - vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); - vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); - vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); - vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); - vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); - } else - WARN_ON(true); - vmw_overlay_resume_all(dev_priv); + WARN_ON(ret != 0); + return 0; } @@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, /* XXX get the first 3 from the surface info */ vfbd->base.base.bits_per_pixel = 32; - vfbd->base.base.pitch = width * 32 / 4; + vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; vfbd->base.base.depth = 24; vfbd->base.base.width = width; vfbd->base.base.height = height; @@ -752,14 +771,8 @@ err_not_scanout: return NULL; } -static int vmw_kms_fb_changed(struct drm_device *dev) -{ - return 0; -} - static struct drm_mode_config_funcs vmw_kms_funcs = { .fb_create = vmw_kms_fb_create, - .fb_changed = vmw_kms_fb_changed, }; int vmw_kms_init(struct vmw_private *dev_priv) @@ -771,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.funcs = &vmw_kms_funcs; dev->mode_config.min_width = 1; dev->mode_config.min_height = 1; - dev->mode_config.max_width = dev_priv->fb_max_width; - dev->mode_config.max_height = dev_priv->fb_max_height; + /* assumed largest fb size */ + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; ret = vmw_kms_init_legacy_display_system(dev_priv); @@ -832,49 +846,141 @@ out: return ret; } +void vmw_kms_write_svga(struct vmw_private *vmw_priv, + unsigned width, unsigned height, unsigned pitch, + unsigned bbp, unsigned depth) +{ + if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) + vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); + else if (vmw_fifo_have_pitchlock(vmw_priv)) + iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); + vmw_write(vmw_priv, SVGA_REG_WIDTH, width); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); + vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); + vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); + vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); + vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); +} + int vmw_kms_save_vga(struct vmw_private *vmw_priv) { - /* - * setup a single multimon monitor with the size - * of 0x0, this stops the UI from resizing when we - * change the framebuffer size - */ - if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); - } + struct vmw_vga_topology_state *save; + uint32_t i; vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); - vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); + vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); - vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); + vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); + if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) + vmw_priv->vga_pitchlock = + vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); + else if (vmw_fifo_have_pitchlock(vmw_priv)) + vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + + SVGA_FIFO_PITCHLOCK); + + if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) + return 0; + vmw_priv->num_displays = vmw_read(vmw_priv, + SVGA_REG_NUM_GUEST_DISPLAYS); + + for (i = 0; i < vmw_priv->num_displays; ++i) { + save = &vmw_priv->vga_save[i]; + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); + save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); + save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); + save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); + save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); + save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + } return 0; } int vmw_kms_restore_vga(struct vmw_private *vmw_priv) { + struct vmw_vga_topology_state *save; + uint32_t i; + vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); - vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); + if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) + vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, + vmw_priv->vga_pitchlock); + else if (vmw_fifo_have_pitchlock(vmw_priv)) + iowrite32(vmw_priv->vga_pitchlock, + vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); + + if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) + return 0; - /* TODO check for multimon */ - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); + for (i = 0; i < vmw_priv->num_displays; ++i) { + save = &vmw_priv->vga_save[i]; + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + } return 0; } + +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_update_layout_arg *arg = + (struct drm_vmw_update_layout_arg *)data; + struct vmw_master *vmaster = vmw_master(file_priv->master); + void __user *user_rects; + struct drm_vmw_rect *rects; + unsigned rects_size; + int ret; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + if (!arg->num_outputs) { + struct drm_vmw_rect def_rect = {0, 0, 800, 600}; + vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); + goto out_unlock; + } + + rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); + rects = kzalloc(rects_size, GFP_KERNEL); + if (unlikely(!rects)) { + ret = -ENOMEM; + goto out_unlock; + } + + user_rects = (void __user *)(unsigned long)arg->rects; + ret = copy_from_user(rects, user_rects, rects_size); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to get rects.\n"); + ret = -EFAULT; + goto out_free; + } + + vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); + +out_free: + kfree(rects); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8b95249f0531..8a398a0339b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); /* - * Legacy display unit functions - vmwgfx_ldu.h + * Legacy display unit functions - vmwgfx_ldu.c */ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); +int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, + struct drm_vmw_rect *rects); #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 90891593bf6c..cfaf690a5b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -38,6 +38,7 @@ struct vmw_legacy_display { struct list_head active; unsigned num_active; + unsigned last_num_active; struct vmw_framebuffer *fb; }; @@ -48,9 +49,12 @@ struct vmw_legacy_display { struct vmw_legacy_display_unit { struct vmw_display_unit base; - struct list_head active; + unsigned pref_width; + unsigned pref_height; + bool pref_active; + struct drm_display_mode *pref_mode; - unsigned unit; + struct list_head active; }; static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) @@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) { struct vmw_legacy_display *lds = dev_priv->ldu_priv; struct vmw_legacy_display_unit *entry; - struct drm_crtc *crtc; + struct drm_framebuffer *fb = NULL; + struct drm_crtc *crtc = NULL; int i = 0; - /* to stop the screen from changing size on resize */ - vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); - for (i = 0; i < lds->num_active; i++) { - vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); - vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); - vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); - vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + /* If there is no display topology the host just assumes + * that the guest will set the same layout as the host. + */ + if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { + int w = 0, h = 0; + list_for_each_entry(entry, &lds->active, active) { + crtc = &entry->base.crtc; + w = max(w, crtc->x + crtc->mode.hdisplay); + h = max(h, crtc->y + crtc->mode.vdisplay); + i++; + } + + if (crtc == NULL) + return 0; + fb = entry->base.crtc.fb; + + vmw_kms_write_svga(dev_priv, w, h, fb->pitch, + fb->bits_per_pixel, fb->depth); + + return 0; } - /* Now set the mode */ - vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); + if (!list_empty(&lds->active)) { + entry = list_entry(lds->active.next, typeof(*entry), active); + fb = entry->base.crtc.fb; + + vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, + fb->bits_per_pixel, fb->depth); + } + + /* Make sure we always show something. */ + vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, + lds->num_active ? lds->num_active : 1); + i = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; @@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) i++; } + BUG_ON(i != lds->num_active); + + lds->last_num_active = lds->num_active; + return 0; } @@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, if (list_empty(&ldu->active)) return 0; + /* Must init otherwise list_empty(&ldu->active) will not work. */ list_del_init(&ldu->active); if (--(ld->num_active) == 0) { BUG_ON(!ld->fb); @@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *entry; struct list_head *at; + BUG_ON(!ld->num_active && ld->fb); + if (vfb != ld->fb) { + if (ld->fb && ld->fb->unpin) + ld->fb->unpin(ld->fb); + if (vfb->pin) + vfb->pin(vfb); + ld->fb = vfb; + } + if (!list_empty(&ldu->active)) return 0; at = &ld->active; list_for_each_entry(entry, &ld->active, active) { - if (entry->unit > ldu->unit) + if (entry->base.unit > ldu->base.unit) break; at = &entry->active; } list_add(&ldu->active, at); - if (ld->num_active++ == 0) { - BUG_ON(ld->fb); - if (vfb->pin) - vfb->pin(vfb); - ld->fb = vfb; - } + + ld->num_active++; return 0; } @@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) /* ldu only supports one fb active at the time */ if (dev_priv->ldu_priv->fb && vfb && + !(dev_priv->ldu_priv->num_active == 1 && + !list_empty(&ldu->active)) && dev_priv->ldu_priv->fb != vfb) { DRM_ERROR("Multiple framebuffers not supported\n"); return -EINVAL; @@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) static enum drm_connector_status vmw_ldu_connector_detect(struct drm_connector *connector) { - /* XXX vmwctrl should control connection status */ - if (vmw_connector_to_ldu(connector)->base.unit == 0) + if (vmw_connector_to_ldu(connector)->pref_active) return connector_status_connected; return connector_status_disconnected; } @@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 800x600@60Hz */ - { DRM_MODE("800x600", - DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, - 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, + 968, 1056, 0, 600, 601, 605, 628, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, @@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, uint32_t max_width, uint32_t max_height) { + struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; + struct drm_display_mode prefmode = { DRM_MODE("preferred", + DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) + }; int i; + /* Add preferred mode */ + { + mode = drm_mode_duplicate(dev, &prefmode); + if (!mode) + return 0; + mode->hdisplay = ldu->pref_width; + mode->vdisplay = ldu->pref_height; + mode->vrefresh = drm_mode_vrefresh(mode); + drm_mode_probed_add(connector, mode); + + if (ldu->pref_mode) { + list_del_init(&ldu->pref_mode->head); + drm_mode_destroy(dev, ldu->pref_mode); + } + + ldu->pref_mode = mode; + } + for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { if (vmw_ldu_connector_builtin[i].hdisplay > max_width || vmw_ldu_connector_builtin[i].vdisplay > max_height) @@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) if (!ldu) return -ENOMEM; - ldu->unit = unit; + ldu->base.unit = unit; crtc = &ldu->base.crtc; encoder = &ldu->base.encoder; connector = &ldu->base.connector; + INIT_LIST_HEAD(&ldu->active); + + ldu->pref_active = (unit == 0); + ldu->pref_width = 800; + ldu->pref_height = 600; + ldu->pref_mode = NULL; + drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - /* Initial status */ - if (unit == 0) - connector->status = connector_status_connected; - else - connector->status = connector_status_disconnected; + connector->status = vmw_ldu_connector_detect(connector); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, DRM_MODE_ENCODER_LVDS); @@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; - INIT_LIST_HEAD(&ldu->active); - drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_connector_attach_property(connector, @@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) INIT_LIST_HEAD(&dev_priv->ldu_priv->active); dev_priv->ldu_priv->num_active = 0; + dev_priv->ldu_priv->last_num_active = 0; dev_priv->ldu_priv->fb = NULL; drm_mode_create_dirty_info_property(dev_priv->dev); vmw_ldu_init(dev_priv, 0); - vmw_ldu_init(dev_priv, 1); - vmw_ldu_init(dev_priv, 2); - vmw_ldu_init(dev_priv, 3); - vmw_ldu_init(dev_priv, 4); - vmw_ldu_init(dev_priv, 5); - vmw_ldu_init(dev_priv, 6); - vmw_ldu_init(dev_priv, 7); + /* for old hardware without multimon only enable one display */ + if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { + vmw_ldu_init(dev_priv, 1); + vmw_ldu_init(dev_priv, 2); + vmw_ldu_init(dev_priv, 3); + vmw_ldu_init(dev_priv, 4); + vmw_ldu_init(dev_priv, 5); + vmw_ldu_init(dev_priv, 6); + vmw_ldu_init(dev_priv, 7); + } return 0; } @@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) return 0; } + +int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, + struct drm_vmw_rect *rects) +{ + struct drm_device *dev = dev_priv->dev; + struct vmw_legacy_display_unit *ldu; + struct drm_connector *con; + int i; + + mutex_lock(&dev->mode_config.mutex); + +#if 0 + DRM_INFO("%s: new layout ", __func__); + for (i = 0; i < (int)num; i++) + DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, + rects[i].w, rects[i].h); + DRM_INFO("\n"); +#else + (void)i; +#endif + + list_for_each_entry(con, &dev->mode_config.connector_list, head) { + ldu = vmw_connector_to_ldu(con); + if (num > ldu->base.unit) { + ldu->pref_width = rects[ldu->base.unit].w; + ldu->pref_height = rects[ldu->base.unit].h; + ldu->pref_active = true; + } else { + ldu->pref_width = 800; + ldu->pref_height = 600; + ldu->pref_active = false; + } + con->status = vmw_ldu_connector_detect(con); + } + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 5b6eabeb7f51..df2036ed18d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, if (pin) overlay_placement = &vmw_vram_ne_placement; - ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); + ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); ttm_bo_unreserve(bo); @@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, if (stream->buf != buf) stream->buf = vmw_dmabuf_reference(buf); stream->saved = *arg; + /* stream is no longer stopped/paused */ + stream->paused = false; return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index f8fbbc67a406..5f2d5df01e5c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ret = copy_from_user(srf->sizes, user_sizes, srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) + if (unlikely(ret != 0)) { + ret = -EFAULT; goto out_err1; + } if (srf->scanout && srf->num_sizes == 1 && @@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, if (user_sizes) ret = copy_to_user(user_sizes, srf->sizes, srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) + if (unlikely(ret != 0)) { DRM_ERROR("copy_to_user failed %p %u\n", user_sizes, srf->num_sizes); + ret = -EFAULT; + } out_bad_resource: out_no_reference: ttm_base_object_unref(&base); @@ -1013,7 +1017,7 @@ int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) } /* - * Stream managment + * Stream management */ static void vmw_stream_destroy(struct vmw_resource *res) diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index 61ab4daf0bbb..8d0e31a22027 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig @@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS multiple GPUS. The overhead for each GPU is very small. config VGA_SWITCHEROO - bool "Laptop Hybrid Grapics - GPU switching support" + bool "Laptop Hybrid Graphics - GPU switching support" depends on X86 depends on ACPI help - Many laptops released in 2008/9/10 have two gpus with a multiplxer + Many laptops released in 2008/9/10 have two GPUs with a multiplexer to switch between them. This adds support for dynamic switching when X isn't running and delayed switching until the next logoff. This - features is called hybrid graphics, ATI PowerXpress, and Nvidia + feature is called hybrid graphics, ATI PowerXpress, and Nvidia HybridPower. diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index d6d1149d525d..c8768f38511e 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -276,8 +276,10 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, mutex_lock(&vgasr_mutex); - if (!vgasr_priv.active) - return -EINVAL; + if (!vgasr_priv.active) { + cnt = -EINVAL; + goto out; + } /* pwr off the device not in use */ if (strncmp(usercmd, "OFF", 3) == 0) { diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 8827814d0735..b87569e96b16 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1,12 +1,32 @@ /* - * vgaarb.c + * vgaarb.c: Implements the VGA arbitration. For details refer to + * Documentation/vgaarbiter.txt + * * * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> * - * Implements the VGA arbitration. For details refer to - * Documentation/vgaarbiter.txt + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. + * */ #include <linux/module.h> @@ -20,6 +40,7 @@ #include <linux/spinlock.h> #include <linux/poll.h> #include <linux/miscdevice.h> +#include <linux/slab.h> #include <linux/uaccess.h> @@ -154,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) rsrc |= VGA_RSRC_LEGACY_MEM; - pr_devel("%s: %d\n", __func__, rsrc); - pr_devel("%s: owns: %d\n", __func__, vgadev->owns); + pr_debug("%s: %d\n", __func__, rsrc); + pr_debug("%s: owns: %d\n", __func__, vgadev->owns); /* Check what resources we need to acquire */ wants = rsrc & ~vgadev->owns; @@ -267,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) { unsigned int old_locks = vgadev->locks; - pr_devel("%s\n", __func__); + pr_debug("%s\n", __func__); /* Update our counters, and account for equivalent legacy resources * if we decode them @@ -574,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, else vga_decode_count--; } + pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); } void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) @@ -830,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, curr_pos += 5; remaining -= 5; - pr_devel("client 0x%p called 'lock'\n", priv); + pr_debug("client 0x%p called 'lock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; @@ -866,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, curr_pos += 7; remaining -= 7; - pr_devel("client 0x%p called 'unlock'\n", priv); + pr_debug("client 0x%p called 'unlock'\n", priv); if (strncmp(curr_pos, "all", 3) == 0) io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; @@ -916,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, curr_pos += 8; remaining -= 8; - pr_devel("client 0x%p called 'trylock'\n", priv); + pr_debug("client 0x%p called 'trylock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; @@ -960,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, curr_pos += 7; remaining -= 7; - pr_devel("client 0x%p called 'target'\n", priv); + pr_debug("client 0x%p called 'target'\n", priv); /* if target is default */ if (!strncmp(curr_pos, "default", 7)) pdev = pci_dev_get(vga_default_device()); @@ -970,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, ret_val = -EPROTO; goto done; } - pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, + pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); pbus = pci_find_bus(domain, bus); - pr_devel("vgaarb: pbus %p\n", pbus); + pr_debug("vgaarb: pbus %p\n", pbus); if (pbus == NULL) { pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", domain, bus); @@ -982,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, goto done; } pdev = pci_get_slot(pbus, devfn); - pr_devel("vgaarb: pdev %p\n", pdev); + pr_debug("vgaarb: pdev %p\n", pdev); if (!pdev) { pr_err("vgaarb: invalid PCI address %x:%x\n", bus, devfn); @@ -992,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, } vgadev = vgadev_find(pdev); - pr_devel("vgaarb: vgadev %p\n", vgadev); + pr_debug("vgaarb: vgadev %p\n", vgadev); if (vgadev == NULL) { pr_err("vgaarb: this pci device is not a vga device\n"); pci_dev_put(pdev); @@ -1028,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, } else if (strncmp(curr_pos, "decodes ", 8) == 0) { curr_pos += 8; remaining -= 8; - pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); + pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; @@ -1057,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) { struct vga_arb_private *priv = file->private_data; - pr_devel("%s\n", __func__); + pr_debug("%s\n", __func__); if (priv == NULL) return -ENODEV; @@ -1070,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file) struct vga_arb_private *priv; unsigned long flags; - pr_devel("%s\n", __func__); + pr_debug("%s\n", __func__); priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); if (priv == NULL) @@ -1100,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) unsigned long flags; int i; - pr_devel("%s\n", __func__); + pr_debug("%s\n", __func__); if (priv == NULL) return -ENODEV; @@ -1111,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) uc = &priv->cards[i]; if (uc->pdev == NULL) continue; - pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", + pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", uc->io_cnt, uc->mem_cnt); while (uc->io_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); @@ -1164,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, struct pci_dev *pdev = to_pci_dev(dev); bool notify = false; - pr_devel("%s\n", __func__); + pr_debug("%s\n", __func__); /* For now we're only intereted in devices added and removed. I didn't * test this thing here, so someone needs to double check for the |