diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-12-16 21:02:15 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-12-16 21:02:15 +0000 |
commit | d8c58fabd75021cdd99abcd96513cb088d41092b (patch) | |
tree | f6554ecfb27c0d50f5ae6acae3a7077282813cab | |
parent | 9c04f015ebc2cc2cca5a4a576deb82a311578edc (diff) | |
parent | b08ebe7e776e5be0271ed1e1bbb384e1f29dd117 (diff) |
Merge remote branch 'airlied/drm-core-next' into drm-intel-next
114 files changed, 10988 insertions, 5145 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index bede10a03407..c6b2e27a446a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct drm_display_mode *adjusted_mode, saved_mode; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; @@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!crtc->enabled) return true; + saved_hwmode = crtc->hwmode; saved_mode = crtc->mode; saved_x = crtc->x; saved_y = crtc->y; @@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } + /* Store real post-adjustment hardware mode. */ + crtc->hwmode = *adjusted_mode; + + /* Calculate and store various constants which + * are later needed by vblank and swap-completion + * timestamping. They are derived from true hwmode. + */ + drm_calc_timestamping_constants(crtc); + /* XXX free adjustedmode */ drm_mode_destroy(dev, adjusted_mode); /* FIXME: add subpixel order */ done: if (!ret) { + crtc->hwmode = saved_hwmode; crtc->mode = saved_mode; crtc->x = saved_x; crtc->y = saved_y; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 722700d5d73e..77c875db5aae 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -40,6 +40,22 @@ #include <linux/slab.h> #include <linux/vgaarb.h> + +/* Access macro for slots in vblank timestamp ringbuffer. */ +#define vblanktimestamp(dev, crtc, count) ( \ + (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ + ((count) % DRM_VBLANKTIME_RBSIZE)]) + +/* Retry timestamp calculation up to 3 times to satisfy + * drm_timestamp_precision before giving up. + */ +#define DRM_TIMESTAMP_MAXRETRIES 3 + +/* Threshold in nanoseconds for detection of redundant + * vblank irq in drm_handle_vblank(). 1 msec should be ok. + */ +#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 + /** * Get interrupt from bus id. * @@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, return 0; } +/* + * Clear vblank timestamp buffer for a crtc. + */ +static void clear_vblank_timestamps(struct drm_device *dev, int crtc) +{ + memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, + DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); +} + +/* + * Disable vblank irq's on crtc, make sure that last vblank count + * of hardware and corresponding consistent software vblank counter + * are preserved, even if there are any spurious vblank irq's after + * disable. + */ +static void vblank_disable_and_save(struct drm_device *dev, int crtc) +{ + unsigned long irqflags; + u32 vblcount; + s64 diff_ns; + int vblrc; + struct timeval tvblank; + + /* Prevent vblank irq processing while disabling vblank irqs, + * so no updates of timestamps or count can happen after we've + * disabled. Needed to prevent races in case of delayed irq's. + * Disable preemption, so vblank_time_lock is held as short as + * possible, even under a kernel with PREEMPT_RT patches. + */ + preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags); + + dev->driver->disable_vblank(dev, crtc); + dev->vblank_enabled[crtc] = 0; + + /* No further vblank irq's will be processed after + * this point. Get current hardware vblank count and + * vblank timestamp, repeat until they are consistent. + * + * FIXME: There is still a race condition here and in + * drm_update_vblank_count() which can cause off-by-one + * reinitialization of software vblank counter. If gpu + * vblank counter doesn't increment exactly at the leading + * edge of a vblank interval, then we can lose 1 count if + * we happen to execute between start of vblank and the + * delayed gpu counter increment. + */ + do { + dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); + vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); + } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); + + /* Compute time difference to stored timestamp of last vblank + * as updated by last invocation of drm_handle_vblank() in vblank irq. + */ + vblcount = atomic_read(&dev->_vblank_count[crtc]); + diff_ns = timeval_to_ns(&tvblank) - + timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + + /* If there is at least 1 msec difference between the last stored + * timestamp and tvblank, then we are currently executing our + * disable inside a new vblank interval, the tvblank timestamp + * corresponds to this new vblank interval and the irq handler + * for this vblank didn't run yet and won't run due to our disable. + * Therefore we need to do the job of drm_handle_vblank() and + * increment the vblank counter by one to account for this vblank. + * + * Skip this step if there isn't any high precision timestamp + * available. In that case we can't account for this and just + * hope for the best. + */ + if ((vblrc > 0) && (abs(diff_ns) > 1000000)) + atomic_inc(&dev->_vblank_count[crtc]); + + /* Invalidate all timestamps while vblank irq's are off. */ + clear_vblank_timestamps(dev, crtc); + + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); + preempt_enable(); +} + static void vblank_disable_fn(unsigned long arg) { struct drm_device *dev = (struct drm_device *)arg; @@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg) if (atomic_read(&dev->vblank_refcount[i]) == 0 && dev->vblank_enabled[i]) { DRM_DEBUG("disabling vblank on crtc %d\n", i); - dev->last_vblank[i] = - dev->driver->get_vblank_counter(dev, i); - dev->driver->disable_vblank(dev, i); - dev->vblank_enabled[i] = 0; + vblank_disable_and_save(dev, i); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev) kfree(dev->last_vblank); kfree(dev->last_vblank_wait); kfree(dev->vblank_inmodeset); + kfree(dev->_vblank_time); dev->num_crtcs = 0; } @@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, (unsigned long)dev); spin_lock_init(&dev->vbl_lock); + spin_lock_init(&dev->vblank_time_lock); + dev->num_crtcs = num_crtcs; dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, @@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) if (!dev->vblank_inmodeset) goto err; + dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE, + sizeof(struct timeval), GFP_KERNEL); + if (!dev->_vblank_time) + goto err; + + DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); + + /* Driver specific high-precision vblank timestamping supported? */ + if (dev->driver->get_vblank_timestamp) + DRM_INFO("Driver supports precise vblank timestamp query.\n"); + else + DRM_INFO("No driver support for vblank timestamp query.\n"); + /* Zero per-crtc vblank stuff */ for (i = 0; i < num_crtcs; i++) { init_waitqueue_head(&dev->vbl_queue[i]); @@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install); * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(struct drm_device * dev) +int drm_irq_uninstall(struct drm_device *dev) { unsigned long irqflags; int irq_enabled, i; @@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data, { struct drm_control *ctl = data; - /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ + /* if we haven't irq we fallback for compatibility reasons - + * this used to be a separate function in drm_dma.h + */ switch (ctl->func) { @@ -360,6 +472,287 @@ int drm_control(struct drm_device *dev, void *data, } /** + * drm_calc_timestamping_constants - Calculate and + * store various constants which are later needed by + * vblank and swap-completion timestamping, e.g, by + * drm_calc_vbltimestamp_from_scanoutpos(). + * They are derived from crtc's true scanout timing, + * so they take things like panel scaling or other + * adjustments into account. + * + * @crtc drm_crtc whose timestamp constants should be updated. + * + */ +void drm_calc_timestamping_constants(struct drm_crtc *crtc) +{ + s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; + u64 dotclock; + + /* Dot clock in Hz: */ + dotclock = (u64) crtc->hwmode.clock * 1000; + + /* Valid dotclock? */ + if (dotclock > 0) { + /* Convert scanline length in pixels and video dot clock to + * line duration, frame duration and pixel duration in + * nanoseconds: + */ + pixeldur_ns = (s64) div64_u64(1000000000, dotclock); + linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * + 1000000000), dotclock); + framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; + } else + DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", + crtc->base.id); + + crtc->pixeldur_ns = pixeldur_ns; + crtc->linedur_ns = linedur_ns; + crtc->framedur_ns = framedur_ns; + + DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", + crtc->base.id, crtc->hwmode.crtc_htotal, + crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); + DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", + crtc->base.id, (int) dotclock/1000, (int) framedur_ns, + (int) linedur_ns, (int) pixeldur_ns); +} +EXPORT_SYMBOL(drm_calc_timestamping_constants); + +/** + * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms + * drivers. Implements calculation of exact vblank timestamps from + * given drm_display_mode timings and current video scanout position + * of a crtc. This can be called from within get_vblank_timestamp() + * implementation of a kms driver to implement the actual timestamping. + * + * Should return timestamps conforming to the OML_sync_control OpenML + * extension specification. The timestamp corresponds to the end of + * the vblank interval, aka start of scanout of topmost-leftmost display + * pixel in the following video frame. + * + * Requires support for optional dev->driver->get_scanout_position() + * in kms driver, plus a bit of setup code to provide a drm_display_mode + * that corresponds to the true scanout timing. + * + * The current implementation only handles standard video modes. It + * returns as no operation if a doublescan or interlaced video mode is + * active. Higher level code is expected to handle this. + * + * @dev: DRM device. + * @crtc: Which crtc's vblank timestamp to retrieve. + * @max_error: Desired maximum allowable error in timestamps (nanosecs). + * On return contains true maximum error of timestamp. + * @vblank_time: Pointer to struct timeval which should receive the timestamp. + * @flags: Flags to pass to driver: + * 0 = Default. + * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. + * @refcrtc: drm_crtc* of crtc which defines scanout timing. + * + * Returns negative value on error, failure or if not supported in current + * video mode: + * + * -EINVAL - Invalid crtc. + * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. + * -ENOTSUPP - Function not supported in current display mode. + * -EIO - Failed, e.g., due to failed scanout position query. + * + * Returns or'ed positive status flags on success: + * + * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping. + * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. + * + */ +int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags, + struct drm_crtc *refcrtc) +{ + struct timeval stime, raw_time; + struct drm_display_mode *mode; + int vbl_status, vtotal, vdisplay; + int vpos, hpos, i; + s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; + bool invbl; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Scanout position query not supported? Should not happen. */ + if (!dev->driver->get_scanout_position) { + DRM_ERROR("Called from driver w/o get_scanout_position()!?\n"); + return -EIO; + } + + mode = &refcrtc->hwmode; + vtotal = mode->crtc_vtotal; + vdisplay = mode->crtc_vdisplay; + + /* Durations of frames, lines, pixels in nanoseconds. */ + framedur_ns = refcrtc->framedur_ns; + linedur_ns = refcrtc->linedur_ns; + pixeldur_ns = refcrtc->pixeldur_ns; + + /* If mode timing undefined, just return as no-op: + * Happens during initial modesetting of a crtc. + */ + if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { + DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); + return -EAGAIN; + } + + /* Don't know yet how to handle interlaced or + * double scan modes. Just no-op for now. + */ + if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); + return -ENOTSUPP; + } + + /* Get current scanout position with system timestamp. + * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times + * if single query takes longer than max_error nanoseconds. + * + * This guarantees a tight bound on maximum error if + * code gets preempted or delayed for some reason. + */ + for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { + /* Disable preemption to make it very likely to + * succeed in the first iteration even on PREEMPT_RT kernel. + */ + preempt_disable(); + + /* Get system timestamp before query. */ + do_gettimeofday(&stime); + + /* Get vertical and horizontal scanout pos. vpos, hpos. */ + vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); + + /* Get system timestamp after query. */ + do_gettimeofday(&raw_time); + + preempt_enable(); + + /* Return as no-op if scanout query unsupported or failed. */ + if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { + DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", + crtc, vbl_status); + return -EIO; + } + + duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); + + /* Accept result with < max_error nsecs timing uncertainty. */ + if (duration_ns <= (s64) *max_error) + break; + } + + /* Noisy system timing? */ + if (i == DRM_TIMESTAMP_MAXRETRIES) { + DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", + crtc, (int) duration_ns/1000, *max_error/1000, i); + } + + /* Return upper bound of timestamp precision error. */ + *max_error = (int) duration_ns; + + /* Check if in vblank area: + * vpos is >=0 in video scanout area, but negative + * within vblank area, counting down the number of lines until + * start of scanout. + */ + invbl = vbl_status & DRM_SCANOUTPOS_INVBL; + + /* Convert scanout position into elapsed time at raw_time query + * since start of scanout at first display scanline. delta_ns + * can be negative if start of scanout hasn't happened yet. + */ + delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns; + + /* Is vpos outside nominal vblank area, but less than + * 1/100 of a frame height away from start of vblank? + * If so, assume this isn't a massively delayed vblank + * interrupt, but a vblank interrupt that fired a few + * microseconds before true start of vblank. Compensate + * by adding a full frame duration to the final timestamp. + * Happens, e.g., on ATI R500, R600. + * + * We only do this if DRM_CALLED_FROM_VBLIRQ. + */ + if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && + ((vdisplay - vpos) < vtotal / 100)) { + delta_ns = delta_ns - framedur_ns; + + /* Signal this correction as "applied". */ + vbl_status |= 0x8; + } + + /* Subtract time delta from raw timestamp to get final + * vblank_time timestamp for end of vblank. + */ + *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); + + DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", + crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, + raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, + (int) duration_ns/1000, i); + + vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; + if (invbl) + vbl_status |= DRM_VBLANKTIME_INVBL; + + return vbl_status; +} +EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); + +/** + * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent + * vblank interval. + * + * @dev: DRM device + * @crtc: which crtc's vblank timestamp to retrieve + * @tvblank: Pointer to target struct timeval which should receive the timestamp + * @flags: Flags to pass to driver: + * 0 = Default. + * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. + * + * Fetches the system timestamp corresponding to the time of the most recent + * vblank interval on specified crtc. May call into kms-driver to + * compute the timestamp with a high-precision GPU specific method. + * + * Returns zero if timestamp originates from uncorrected do_gettimeofday() + * call, i.e., it isn't very precisely locked to the true vblank. + * + * Returns non-zero if timestamp is considered to be very precise. + */ +u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, + struct timeval *tvblank, unsigned flags) +{ + int ret = 0; + + /* Define requested maximum error on timestamps (nanoseconds). */ + int max_error = (int) drm_timestamp_precision * 1000; + + /* Query driver if possible and precision timestamping enabled. */ + if (dev->driver->get_vblank_timestamp && (max_error > 0)) { + ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, + tvblank, flags); + if (ret > 0) + return (u32) ret; + } + + /* GPU high precision timestamp query unsupported or failed. + * Return gettimeofday timestamp as best estimate. + */ + do_gettimeofday(tvblank); + + return 0; +} +EXPORT_SYMBOL(drm_get_last_vbltimestamp); + +/** * drm_vblank_count - retrieve "cooked" vblank counter value * @dev: DRM device * @crtc: which counter to retrieve @@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc) EXPORT_SYMBOL(drm_vblank_count); /** + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value + * and the system timestamp corresponding to that vblank counter value. + * + * @dev: DRM device + * @crtc: which counter to retrieve + * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. + * + * Fetches the "cooked" vblank count value that represents the number of + * vblank events since the system was booted, including lost events due to + * modesetting activity. Returns corresponding system timestamp of the time + * of the vblank interval that corresponds to the current value vblank counter + * value. + */ +u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, + struct timeval *vblanktime) +{ + u32 cur_vblank; + + /* Read timestamp from slot of _vblank_time ringbuffer + * that corresponds to current vblank count. Retry if + * count has incremented during readout. This works like + * a seqlock. + */ + do { + cur_vblank = atomic_read(&dev->_vblank_count[crtc]); + *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); + smp_rmb(); + } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); + + return cur_vblank; +} +EXPORT_SYMBOL(drm_vblank_count_and_time); + +/** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device * @crtc: counter to update @@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count); */ static void drm_update_vblank_count(struct drm_device *dev, int crtc) { - u32 cur_vblank, diff; + u32 cur_vblank, diff, tslot, rc; + struct timeval t_vblank; /* * Interrupts were disabled prior to this call, so deal with counter @@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. + * + * We repeat the hardware vblank counter & timestamp query until + * we get consistent results. This to prevent races between gpu + * updating its hardware counter while we are retrieving the + * corresponding vblank timestamp. */ - cur_vblank = dev->driver->get_vblank_counter(dev, crtc); + do { + cur_vblank = dev->driver->get_vblank_counter(dev, crtc); + rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); + } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); + + /* Deal with counter wrap */ diff = cur_vblank - dev->last_vblank[crtc]; if (cur_vblank < dev->last_vblank[crtc]) { diff += dev->max_vblank_count; @@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); + /* Reinitialize corresponding vblank timestamp if high-precision query + * available. Skip this step if query unsupported or failed. Will + * reinitialize delayed at next vblank interrupt in that case. + */ + if (rc) { + tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; + vblanktimestamp(dev, crtc, tslot) = t_vblank; + smp_wmb(); + } + atomic_add(diff, &dev->_vblank_count[crtc]); } @@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) */ int drm_vblank_get(struct drm_device *dev, int crtc) { - unsigned long irqflags; + unsigned long irqflags, irqflags2; int ret = 0; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { + /* Disable preemption while holding vblank_time_lock. Do + * it explicitely to guard against PREEMPT_RT kernel. + */ + preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); if (!dev->vblank_enabled[crtc]) { + /* Enable vblank irqs under vblank_time_lock protection. + * All vblank count & timestamp updates are held off + * until we are done reinitializing master counter and + * timestamps. Filtercode in drm_handle_vblank() will + * prevent double-accounting of same vblank interval. + */ ret = dev->driver->enable_vblank(dev, crtc); - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); + DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", + crtc, ret); if (ret) atomic_dec(&dev->vblank_refcount[crtc]); else { @@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc) drm_update_vblank_count(dev, crtc); } } + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); + preempt_enable(); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); @@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get); * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts - * if possible. + * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. */ void drm_vblank_put(struct drm_device *dev, int crtc) { - BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); + BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); /* Last user schedules interrupt disable */ - if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) - mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); + if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && + (drm_vblank_offdelay > 0)) + mod_timer(&dev->vblank_disable_timer, + jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); } EXPORT_SYMBOL(drm_vblank_put); @@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) unsigned long irqflags; spin_lock_irqsave(&dev->vbl_lock, irqflags); - dev->driver->disable_vblank(dev, crtc); + vblank_disable_and_save(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); - dev->vblank_enabled[crtc] = 0; - dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_off); @@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; - do_gettimeofday(&now); spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { @@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, } file_priv->event_space -= sizeof e->event; - seq = drm_vblank_count(dev, pipe); + seq = drm_vblank_count_and_time(dev, pipe, &now); + if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; @@ -727,11 +1189,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (ret != -EINTR) { struct timeval now; - do_gettimeofday(&now); - + vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; - vblwait->reply.sequence = drm_vblank_count(dev, crtc); + DRM_DEBUG("returning %d to client\n", vblwait->reply.sequence); } else { @@ -750,8 +1211,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) unsigned long flags; unsigned int seq; - do_gettimeofday(&now); - seq = drm_vblank_count(dev, crtc); + seq = drm_vblank_count_and_time(dev, crtc, &now); spin_lock_irqsave(&dev->event_lock, flags); @@ -789,11 +1249,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) */ void drm_handle_vblank(struct drm_device *dev, int crtc) { + u32 vblcount; + s64 diff_ns; + struct timeval tvblank; + unsigned long irqflags; + if (!dev->num_crtcs) return; - atomic_inc(&dev->_vblank_count[crtc]); + /* Need timestamp lock to prevent concurrent execution with + * vblank enable/disable, as this would cause inconsistent + * or corrupted timestamps and vblank counts. + */ + spin_lock_irqsave(&dev->vblank_time_lock, irqflags); + + /* Vblank irq handling disabled. Nothing to do. */ + if (!dev->vblank_enabled[crtc]) { + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); + return; + } + + /* Fetch corresponding timestamp for this vblank interval from + * driver and store it in proper slot of timestamp ringbuffer. + */ + + /* Get current timestamp and count. */ + vblcount = atomic_read(&dev->_vblank_count[crtc]); + drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); + + /* Compute time difference to timestamp of last vblank */ + diff_ns = timeval_to_ns(&tvblank) - + timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + + /* Update vblank timestamp and count if at least + * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds + * difference between last stored timestamp and current + * timestamp. A smaller difference means basically + * identical timestamps. Happens if this vblank has + * been already processed and this is a redundant call, + * e.g., due to spurious vblank interrupts. We need to + * ignore those for accounting. + */ + if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { + /* Store new timestamp in ringbuffer. */ + vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; + smp_wmb(); + + /* Increment cooked vblank count. This also atomically commits + * the timestamp computed above. + */ + atomic_inc(&dev->_vblank_count[crtc]); + } else { + DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", + crtc, (int) diff_ns); + } + DRM_WAKEUP(&dev->vbl_queue[crtc]); drm_handle_vblank_events(dev, crtc); + + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } EXPORT_SYMBOL(drm_handle_vblank); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index cdc89ee042cc..d59edc18301f 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -40,12 +40,22 @@ unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); +unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ +EXPORT_SYMBOL(drm_vblank_offdelay); + +unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ +EXPORT_SYMBOL(drm_timestamp_precision); + MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); +MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); +MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); module_param_named(debug, drm_debug, int, 0600); +module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); +module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); struct idr drm_minors_idr; diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 23fa82d667d6..b1d8941e04d8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -5,12 +5,13 @@ ccflags-y := -Iinclude/drm nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ - nouveau_sgdma.o nouveau_dma.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ nouveau_dp.o nouveau_ramht.o \ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ + nouveau_mm.o nouveau_vm.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ @@ -18,14 +19,16 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o nvc0_graph.o \ nv40_grctx.o nv50_grctx.o \ + nv84_crypt.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ - nv50_crtc.o nv50_dac.o nv50_sor.o \ + nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ - nv04_pm.o nv50_pm.o nva3_pm.o + nv04_pm.o nv50_pm.o nva3_pm.o \ + nv50_vram.o nv50_vm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b2293576f278..d3046559bf05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) return entry; } -static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) +static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, + int heads, int or) { struct dcb_entry *entry = new_dcb_entry(dcb); - entry->type = 0; + entry->type = type; entry->i2c_index = i2c; entry->heads = heads; - entry->location = DCB_LOC_ON_CHIP; - entry->or = 1; -} - -static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads) -{ - struct dcb_entry *entry = new_dcb_entry(dcb); - - entry->type = 2; - entry->i2c_index = LEGACY_I2C_PANEL; - entry->heads = twoHeads ? 3 : 1; - entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ - entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */ - entry->duallink_possible = false; /* SiI164 and co. are single link */ - -#if 0 - /* - * For dvi-a either crtc probably works, but my card appears to only - * support dvi-d. "nvidia" still attempts to program it for dvi-a, - * doing the full fp output setup (program 0x6808.. fp dimension regs, - * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880); - * the monitor picks up the mode res ok and lights up, but no pixel - * data appears, so the board manufacturer probably connected up the - * sync lines, but missed the video traces / components - * - * with this introduction, dvi-a left as an exercise for the reader. - */ - fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads); -#endif -} - -static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads) -{ - struct dcb_entry *entry = new_dcb_entry(dcb); - - entry->type = 1; - entry->i2c_index = LEGACY_I2C_TV; - entry->heads = twoHeads ? 3 : 1; - entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + if (type != OUTPUT_ANALOG) + entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + entry->or = or; } static bool @@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) return true; } +static void +fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) +{ + struct dcb_table *dcb = &bios->dcb; + int all_heads = (nv_two_heads(dev) ? 3 : 1); + +#ifdef __powerpc__ + /* Apple iMac G4 NV17 */ + if (of_machine_is_compatible("PowerMac4,5")) { + fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1); + fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2); + return; + } +#endif + + /* Make up some sane defaults */ + fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1); + + if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) + fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV, + all_heads, 0); + + else if (bios->tmds.output0_script_ptr || + bios->tmds.output1_script_ptr) + fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL, + all_heads, 1); +} + static int -parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) +parse_dcb_table(struct drm_device *dev, struct nvbios *bios) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct dcb_table *dcb = &bios->dcb; @@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) /* this situation likely means a really old card, pre DCB */ if (dcbptr == 0x0) { - NV_INFO(dev, "Assuming a CRT output exists\n"); - fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); - - if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) - fabricate_tv_output(dcb, twoHeads); - + fabricate_dcb_encoder_table(dev, bios); return 0; } @@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) */ NV_TRACEWARN(dev, "No useful information in BIOS output table; " "adding all possible outputs\n"); - fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); - - /* - * Attempt to detect TV before DVI because the test - * for the former is more accurate and it rules the - * latter out. - */ - if (nv04_tv_identify(dev, - bios->legacy.i2c_indices.tv) >= 0) - fabricate_tv_output(dcb, twoHeads); - - else if (bios->tmds.output0_script_ptr || - bios->tmds.output1_script_ptr) - fabricate_dvi_i_output(dcb, twoHeads); - + fabricate_dcb_encoder_table(dev, bios); return 0; } @@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev) if (ret) return ret; - ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); + ret = parse_dcb_table(dev, bios); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c41e1c200ef5..42d1ad62b381 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -32,6 +32,8 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_mm.h" +#include "nouveau_vm.h" #include <linux/log2.h> #include <linux/slab.h> @@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); - if (nvbo->tile) - nv10_mem_expire_tiling(dev, nvbo->tile, NULL); - + nv10_mem_put_tile_region(dev, nvbo->tile, NULL); + nouveau_vm_put(&nvbo->vma); kfree(nvbo); } static void -nouveau_bo_fixup_align(struct drm_device *dev, - uint32_t tile_mode, uint32_t tile_flags, - int *align, int *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, + int *page_shift) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - /* - * Some of the tile_flags have a periodic structure of N*4096 bytes, - * align to to that as well as the page size. Align the size to the - * appropriate boundaries. This does imply that sizes are rounded up - * 3-7 pages, so be aware of this and do not waste memory by allocating - * many small buffers. - */ - if (dev_priv->card_type == NV_50) { - uint32_t block_size = dev_priv->vram_size >> 15; - int i; - - switch (tile_flags) { - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7a00: - if (is_power_of_2(block_size)) { - for (i = 1; i < 10; i++) { - *align = 12 * i * block_size; - if (!(*align % 65536)) - break; - } - } else { - for (i = 1; i < 10; i++) { - *align = 8 * i * block_size; - if (!(*align % 65536)) - break; - } - } - *size = roundup(*size, *align); - break; - default: - break; - } + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); - } else { - if (tile_mode) { + if (dev_priv->card_type < NV_50) { + if (nvbo->tile_mode) { if (dev_priv->chipset >= 0x40) { *align = 65536; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x30) { *align = 32768; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x20) { *align = 16384; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x10) { *align = 16384; - *size = roundup(*size, 32 * tile_mode); + *size = roundup(*size, 32 * nvbo->tile_mode); } } + } else { + if (likely(dev_priv->chan_vm)) { + if (*size > 256 * 1024) + *page_shift = dev_priv->chan_vm->lpg_shift; + else + *page_shift = dev_priv->chan_vm->spg_shift; + } else { + *page_shift = 12; + } + + *size = roundup(*size, (1 << *page_shift)); + *align = max((1 << *page_shift), *align); } - /* ALIGN works only on powers of two. */ *size = roundup(*size, PAGE_SIZE); - - if (dev_priv->card_type == NV_50) { - *size = roundup(*size, 65536); - *align = max(65536, *align); - } } int @@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; - int ret = 0; + int ret = 0, page_shift = 0; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) @@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &dev_priv->ttm.bdev; - nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), - &align, &size); + nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); align >>= PAGE_SHIFT; + if (!nvbo->no_vm && dev_priv->chan_vm) { + ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, + NV_MEM_ACCESS_RW, &nvbo->vma); + if (ret) { + kfree(nvbo); + return ret; + } + } + nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; @@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } nvbo->channel = NULL; + if (nvbo->vma.node) { + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nvbo->bo.offset = nvbo->vma.offset; + } + *pnvbo = nvbo; return 0; } @@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) nouveau_bo_placement_set(nvbo, memtype, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) ttm_bo_kunmap(&nvbo->kmap); } +int +nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu) +{ + int ret; + + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, + no_wait_reserve, no_wait_gpu); + if (ret) + return ret; + + if (nvbo->vma.node) { + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nvbo->bo.offset = nvbo->vma.offset; + } + + return 0; +} + u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) { @@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - man->func = &ttm_bo_manager_func; + if (dev_priv->card_type == NV_50) { + man->func = &nouveau_vram_manager; + man->io_reserve_fastpath = false; + man->use_io_reserve_lru = true; + } else { + man->func = &ttm_bo_manager_func; + } man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - if (dev_priv->card_type == NV_50) - man->gpu_offset = 0x40000000; - else - man->gpu_offset = 0; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; break; case NOUVEAU_GART_SGDMA: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; + man->gpu_offset = dev_priv->gart_info.aper_base; break; default: NV_ERROR(dev, "Unknown GART type: %d\n", dev_priv->gart_info.type); return -EINVAL; } - man->gpu_offset = dev_priv->vm_gart_base; break; default: NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); @@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - if (nvbo->channel) { - ret = nouveau_fence_sync(fence, nvbo->channel); - if (ret) - goto out; - } - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); -out: - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); return ret; } @@ -529,14 +528,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, dst_offset = new_mem->start << PAGE_SHIFT; if (!nvbo->no_vm) { if (old_mem->mem_type == TTM_PL_VRAM) - src_offset += dev_priv->vm_vram_base; + src_offset = nvbo->vma.offset; else - src_offset += dev_priv->vm_gart_base; + src_offset += dev_priv->gart_info.aper_base; if (new_mem->mem_type == TTM_PL_VRAM) - dst_offset += dev_priv->vm_vram_base; + dst_offset = nvbo->vma.offset; else - dst_offset += dev_priv->vm_gart_base; + dst_offset += dev_priv->gart_info.aper_base; } ret = RING_SPACE(chan, 3); @@ -683,17 +682,24 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, int ret; chan = nvbo->channel; - if (!chan || nvbo->no_vm) + if (!chan || nvbo->no_vm) { chan = dev_priv->channel; + mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); + } if (dev_priv->card_type < NV_50) ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); else ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); - if (ret) - return ret; + if (ret == 0) { + ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, + no_wait_reserve, + no_wait_gpu, new_mem); + } - return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); + if (chan == dev_priv->channel) + mutex_unlock(&chan->mutex); + return ret; } static int @@ -771,7 +777,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); uint64_t offset; - int ret; if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { /* Nothing to do. */ @@ -781,18 +786,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, offset = new_mem->start << PAGE_SHIFT; - if (dev_priv->card_type == NV_50) { - ret = nv50_mem_vm_bind_linear(dev, - offset + dev_priv->vm_vram_base, - new_mem->size, - nouveau_bo_tile_layout(nvbo), - offset); - if (ret) - return ret; - + if (dev_priv->chan_vm) { + nouveau_vm_map(&nvbo->vma, new_mem->mm_node); } else if (dev_priv->card_type >= NV_10) { *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, - nvbo->tile_mode); + nvbo->tile_mode, + nvbo->tile_flags); } return 0; @@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, if (dev_priv->card_type >= NV_10 && dev_priv->card_type < NV_50) { - if (*old_tile) - nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); - + nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); *old_tile = new_tile; } } @@ -879,6 +876,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; + int ret; mem->bus.addr = NULL; mem->bus.offset = 0; @@ -901,9 +899,32 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; + { + struct nouveau_vram *vram = mem->mm_node; + + if (!dev_priv->bar1_vm) { + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(dev->pdev, 1); + mem->bus.is_iomem = true; + break; + } + + ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12, + NV_MEM_ACCESS_RW, &vram->bar_vma); + if (ret) + return ret; + + nouveau_vm_map(&vram->bar_vma, vram); + if (ret) { + nouveau_vm_put(&vram->bar_vma); + return ret; + } + + mem->bus.offset = vram->bar_vma.offset; + mem->bus.offset -= 0x0020000000ULL; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; + } break; default: return -EINVAL; @@ -914,6 +935,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct nouveau_vram *vram = mem->mm_node; + + if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) + return; + + if (!vram->bar_vma.node) + return; + + nouveau_vm_unmap(&vram->bar_vma); + nouveau_vm_put(&vram->bar_vma); } static int @@ -939,7 +971,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->placement.fpfn = 0; nvbo->placement.lpfn = dev_priv->fb_mappable_pages; nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); - return ttm_bo_validate(bo, &nvbo->placement, false, true, false); + return nouveau_bo_validate(nvbo, false, true, false); +} + +void +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) +{ + struct nouveau_fence *old_fence; + + if (likely(fence)) + nouveau_fence_ref(fence); + + spin_lock(&nvbo->bo.bdev->fence_lock); + old_fence = nvbo->bo.sync_obj; + nvbo->bo.sync_obj = fence; + spin_unlock(&nvbo->bo.bdev->fence_lock); + + nouveau_fence_unref(&old_fence); } struct ttm_bo_driver nouveau_bo_driver = { @@ -949,11 +997,11 @@ struct ttm_bo_driver nouveau_bo_driver = { .evict_flags = nouveau_bo_evict_flags, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_fence_signalled, - .sync_obj_wait = nouveau_fence_wait, - .sync_obj_flush = nouveau_fence_flush, - .sync_obj_unref = nouveau_fence_unref, - .sync_obj_ref = nouveau_fence_ref, + .sync_obj_signaled = __nouveau_fence_signalled, + .sync_obj_wait = __nouveau_fence_wait, + .sync_obj_flush = __nouveau_fence_flush, + .sync_obj_unref = __nouveau_fence_unref, + .sync_obj_ref = __nouveau_fence_ref, .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 373950e34814..6f37995aee2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, - dev_priv->vm_end, NV_DMA_ACCESS_RO, - NV_DMA_TARGET_AGP, &pushbuf); + (1ULL << 40), NV_MEM_ACCESS_RO, + NV_MEM_TARGET_VM, &pushbuf); chan->pushbuf_base = pb->bo.offset; } else if (pb->bo.mem.mem_type == TTM_PL_TT) { - ret = nouveau_gpuobj_gart_dma_new(chan, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RO, &pushbuf, - NULL); + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, + dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_GART, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RO, - NV_DMA_TARGET_VIDMEM, &pushbuf); + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_VRAM, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's @@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - pci_resource_start(dev->pdev, - 1), + pci_resource_start(dev->pdev, 1), dev_priv->fb_available_size, - NV_DMA_ACCESS_RO, - NV_DMA_TARGET_PCI, &pushbuf); + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_PCI, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } @@ -107,74 +106,60 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct drm_file *file_priv, - uint32_t vram_handle, uint32_t tt_handle) + uint32_t vram_handle, uint32_t gart_handle) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; - int channel, user; + unsigned long flags; int ret; - /* - * Alright, here is the full story - * Nvidia cards have multiple hw fifo contexts (praise them for that, - * no complicated crash-prone context switches) - * We allocate a new context for each app and let it write to it - * directly (woo, full userspace command submission !) - * When there are no more contexts, you lost - */ - for (channel = 0; channel < pfifo->channels; channel++) { - if (dev_priv->fifos[channel] == NULL) + /* allocate and lock channel structure */ + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = dev; + chan->file_priv = file_priv; + chan->vram_handle = vram_handle; + chan->gart_handle = gart_handle; + + kref_init(&chan->ref); + atomic_set(&chan->users, 1); + mutex_init(&chan->mutex); + mutex_lock(&chan->mutex); + + /* allocate hw channel id */ + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { + if (!dev_priv->channels.ptr[chan->id]) { + nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); break; + } } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - /* no more fifos. you lost. */ - if (channel == pfifo->channels) - return -EINVAL; + if (chan->id == pfifo->channels) { + mutex_unlock(&chan->mutex); + kfree(chan); + return -ENODEV; + } - dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), - GFP_KERNEL); - if (!dev_priv->fifos[channel]) - return -ENOMEM; - chan = dev_priv->fifos[channel]; + NV_DEBUG(dev, "initialising channel %d\n", chan->id); INIT_LIST_HEAD(&chan->nvsw.vbl_wait); + INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); - chan->dev = dev; - chan->id = channel; - chan->file_priv = file_priv; - chan->vram_handle = vram_handle; - chan->gart_handle = tt_handle; - - NV_INFO(dev, "Allocating FIFO number %d\n", channel); /* Allocate DMA push buffer */ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); if (!chan->pushbuf_bo) { ret = -ENOMEM; NV_ERROR(dev, "pushbuf %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } nouveau_dma_pre_init(chan); - - /* Locate channel's user control regs */ - if (dev_priv->card_type < NV_40) - user = NV03_USER(channel); - else - if (dev_priv->card_type < NV_50) - user = NV40_USER(channel); - else - user = NV50_USER(channel); - - chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, - PAGE_SIZE); - if (!chan->user) { - NV_ERROR(dev, "ioremap of regs failed.\n"); - nouveau_channel_free(chan); - return -ENOMEM; - } chan->user_put = 0x40; chan->user_get = 0x44; @@ -182,15 +167,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_notifier_init_channel(chan); if (ret) { NV_ERROR(dev, "ntfy %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } /* Setup channel's default objects */ - ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); + ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); if (ret) { NV_ERROR(dev, "gpuobj %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -198,7 +183,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_channel_pushbuf_ctxdma_init(chan); if (ret) { NV_ERROR(dev, "pbctxdma %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -206,16 +191,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, pfifo->reassign(dev, false); /* Create a graphics context for new channel */ - ret = pgraph->create_context(chan); - if (ret) { - nouveau_channel_free(chan); - return ret; + if (dev_priv->card_type < NV_50) { + ret = pgraph->create_context(chan); + if (ret) { + nouveau_channel_put(&chan); + return ret; + } } /* Construct inital RAMFC for new channel */ ret = pfifo->create_context(chan); if (ret) { - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -225,83 +212,108 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, if (!ret) ret = nouveau_fence_channel_init(chan); if (ret) { - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } nouveau_debugfs_channel_init(chan); - NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); + NV_DEBUG(dev, "channel %d initialised\n", chan->id); *chan_ret = chan; return 0; } -/* stops a fifo */ +struct nouveau_channel * +nouveau_channel_get_unlocked(struct nouveau_channel *ref) +{ + struct nouveau_channel *chan = NULL; + + if (likely(ref && atomic_inc_not_zero(&ref->users))) + nouveau_channel_ref(ref, &chan); + + return chan; +} + +struct nouveau_channel * +nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + + if (unlikely(!chan)) + return ERR_PTR(-EINVAL); + + if (unlikely(file_priv && chan->file_priv != file_priv)) { + nouveau_channel_put_unlocked(&chan); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&chan->mutex); + return chan; +} + void -nouveau_channel_free(struct nouveau_channel *chan) +nouveau_channel_put_unlocked(struct nouveau_channel **pchan) { + struct nouveau_channel *chan = *pchan; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; unsigned long flags; - int ret; - NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); + /* decrement the refcount, and we're done if there's still refs */ + if (likely(!atomic_dec_and_test(&chan->users))) { + nouveau_channel_ref(NULL, pchan); + return; + } + /* noone wants the channel anymore */ + NV_DEBUG(dev, "freeing channel %d\n", chan->id); nouveau_debugfs_channel_fini(chan); - /* Give outstanding push buffers a chance to complete */ - nouveau_fence_update(chan); - if (chan->fence.sequence != chan->fence.sequence_ack) { - struct nouveau_fence *fence = NULL; - - ret = nouveau_fence_new(chan, &fence, true); - if (ret == 0) { - ret = nouveau_fence_wait(fence, NULL, false, false); - nouveau_fence_unref((void *)&fence); - } - - if (ret) - NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); - } + /* give it chance to idle */ + nouveau_channel_idle(chan); - /* Ensure all outstanding fences are signaled. They should be if the + /* ensure all outstanding fences are signaled. they should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_channel_fini(chan); - /* This will prevent pfifo from switching channels. */ + /* boot it off the hardware */ pfifo->reassign(dev, false); - /* We want to give pgraph a chance to idle and get rid of all potential - * errors. We need to do this before the lock, otherwise the irq handler - * is unable to process them. + /* We want to give pgraph a chance to idle and get rid of all + * potential errors. We need to do this without the context + * switch lock held, otherwise the irq handler is unable to + * process them. */ if (pgraph->channel(dev) == chan) nouveau_wait_for_idle(dev); - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - pgraph->fifo_access(dev, false); - if (pgraph->channel(dev) == chan) - pgraph->unload_context(dev); - pgraph->destroy_context(chan); - pgraph->fifo_access(dev, true); - - if (pfifo->channel_id(dev) == chan->id) { - pfifo->disable(dev); - pfifo->unload_context(dev); - pfifo->enable(dev); - } + /* destroy the engine specific contexts */ pfifo->destroy_context(chan); + pgraph->destroy_context(chan); + if (pcrypt->destroy_context) + pcrypt->destroy_context(chan); pfifo->reassign(dev, true); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + /* aside from its resources, the channel should now be dead, + * remove it from the channel list + */ + spin_lock_irqsave(&dev_priv->channels.lock, flags); + nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - /* Release the channel's resources */ + /* destroy any resources the channel owned */ nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); @@ -310,44 +322,80 @@ nouveau_channel_free(struct nouveau_channel *chan) } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); - if (chan->user) - iounmap(chan->user); - dev_priv->fifos[chan->id] = NULL; + nouveau_channel_ref(NULL, pchan); +} + +void +nouveau_channel_put(struct nouveau_channel **pchan) +{ + mutex_unlock(&(*pchan)->mutex); + nouveau_channel_put_unlocked(pchan); +} + +static void +nouveau_channel_del(struct kref *ref) +{ + struct nouveau_channel *chan = + container_of(ref, struct nouveau_channel, ref); + kfree(chan); } +void +nouveau_channel_ref(struct nouveau_channel *chan, + struct nouveau_channel **pchan) +{ + if (chan) + kref_get(&chan->ref); + + if (*pchan) + kref_put(&(*pchan)->ref, nouveau_channel_del); + + *pchan = chan; +} + +void +nouveau_channel_idle(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct nouveau_fence *fence = NULL; + int ret; + + nouveau_fence_update(chan); + + if (chan->fence.sequence != chan->fence.sequence_ack) { + ret = nouveau_fence_new(chan, &fence, true); + if (!ret) { + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); + } + + if (ret) + NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); + } +} + /* cleans up all the fifos from file_priv */ void nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_channel *chan; int i; NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); for (i = 0; i < engine->fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + chan = nouveau_channel_get(dev, file_priv, i); + if (IS_ERR(chan)) + continue; - if (chan && chan->file_priv == file_priv) - nouveau_channel_free(chan); + atomic_dec(&chan->users); + nouveau_channel_put(&chan); } } -int -nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, - int channel) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - - if (channel >= engine->fifo.channels) - return 0; - if (dev_priv->fifos[channel] == NULL) - return 0; - - return (dev_priv->fifos[channel]->file_priv == file_priv); -} /*********************************** * ioctls wrapping the functions @@ -395,24 +443,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, /* Named memory object area */ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, &init->notifier_handle); - if (ret) { - nouveau_channel_free(chan); - return ret; - } - return 0; + if (ret == 0) + atomic_inc(&chan->users); /* userspace reference */ + nouveau_channel_put(&chan); + return ret; } static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_channel_free *cfree = data; + struct drm_nouveau_channel_free *req = data; struct nouveau_channel *chan; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, req->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); - nouveau_channel_free(chan); + atomic_dec(&chan->users); + nouveau_channel_put(&chan); return 0; } @@ -421,18 +471,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 52c356e9a3d1..a21e00076839 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -37,6 +37,8 @@ #include "nouveau_connector.h" #include "nouveau_hw.h" +static void nouveau_connector_hotplug(void *, int); + static struct nouveau_encoder * find_encoder_by_type(struct drm_connector *connector, int type) { @@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector) } static void -nouveau_connector_destroy(struct drm_connector *drm_connector) +nouveau_connector_destroy(struct drm_connector *connector) { - struct nouveau_connector *nv_connector = - nouveau_connector(drm_connector); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_nouveau_private *dev_priv; + struct nouveau_gpio_engine *pgpio; struct drm_device *dev; if (!nv_connector) return; dev = nv_connector->base.dev; + dev_priv = dev->dev_private; NV_DEBUG_KMS(dev, "\n"); + pgpio = &dev_priv->engine.gpio; + if (pgpio->irq_unregister) { + pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag, + nouveau_connector_hotplug, connector); + } + kfree(nv_connector->edid); - drm_sysfs_connector_remove(drm_connector); - drm_connector_cleanup(drm_connector); - kfree(drm_connector); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); } static struct nouveau_i2c_chan * @@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index) { const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct nouveau_connector *nv_connector = NULL; struct dcb_connector_table_entry *dcb = NULL; struct drm_connector *connector; @@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index) break; } + if (pgpio->irq_register) { + pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, + nouveau_connector_hotplug, connector); + } + drm_sysfs_connector_add(connector); dcb->drm = connector; return dcb->drm; @@ -886,3 +902,29 @@ fail: return ERR_PTR(ret); } + +static void +nouveau_connector_hotplug(void *data, int plugged) +{ + struct drm_connector *connector = data; + struct drm_device *dev = connector->dev; + + NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", + drm_get_connector_name(connector)); + + if (connector->encoder && connector->encoder->crtc && + connector->encoder->crtc->enabled) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder); + struct drm_encoder_helper_funcs *helper = + connector->encoder->helper_private; + + if (nv_encoder->dcb->type == OUTPUT_DP) { + if (plugged) + helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); + else + helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); + } + } + + drm_helper_hpd_irq_event(dev); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2e11fd65b4dd..505c6bfb4d75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -29,6 +29,9 @@ #include "nouveau_drv.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_hw.h" +#include "nouveau_crtc.h" +#include "nouveau_dma.h" static void nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) @@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = { .output_poll_changed = nouveau_fbcon_output_poll_changed, }; +int +nouveau_vblank_enable(struct drm_device *dev, int crtc) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type >= NV_50) + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); + else + NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, + NV_PCRTC_INTR_0_VBLANK); + + return 0; +} + +void +nouveau_vblank_disable(struct drm_device *dev, int crtc) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type >= NV_50) + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); + else + NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); +} + +static int +nouveau_page_flip_reserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo) +{ + int ret; + + ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); + if (ret) + goto fail; + + ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); + if (ret) + goto fail_unreserve; + + return 0; + +fail_unreserve: + ttm_bo_unreserve(&new_bo->bo); +fail: + nouveau_bo_unpin(new_bo); + return ret; +} + +static void +nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_fence *fence) +{ + nouveau_bo_fence(new_bo, fence); + ttm_bo_unreserve(&new_bo->bo); + + nouveau_bo_fence(old_bo, fence); + ttm_bo_unreserve(&old_bo->bo); + + nouveau_bo_unpin(old_bo); +} + +static int +nouveau_page_flip_emit(struct nouveau_channel *chan, + struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_page_flip_state *s, + struct nouveau_fence **pfence) +{ + struct drm_device *dev = chan->dev; + unsigned long flags; + int ret; + + /* Queue it to the pending list */ + spin_lock_irqsave(&dev->event_lock, flags); + list_add_tail(&s->head, &chan->nvsw.flip); + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* Synchronize with the old framebuffer */ + ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); + if (ret) + goto fail; + + /* Emit the pageflip */ + ret = RING_SPACE(chan, 2); + if (ret) + goto fail; + + BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); + OUT_RING(chan, 0); + FIRE_RING(chan); + + ret = nouveau_fence_new(chan, pfence, true); + if (ret) + goto fail; + + return 0; +fail: + spin_lock_irqsave(&dev->event_lock, flags); + list_del(&s->head); + spin_unlock_irqrestore(&dev->event_lock, flags); + return ret; +} + +int +nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; + struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; + struct nouveau_page_flip_state *s; + struct nouveau_channel *chan; + struct nouveau_fence *fence; + int ret; + + if (dev_priv->engine.graph.accel_blocked) + return -ENODEV; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + /* Don't let the buffers go away while we flip */ + ret = nouveau_page_flip_reserve(old_bo, new_bo); + if (ret) + goto fail_free; + + /* Initialize a page flip struct */ + *s = (struct nouveau_page_flip_state) + { { }, s->event, nouveau_crtc(crtc)->index, + fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, + new_bo->bo.offset }; + + /* Choose the channel the flip will be handled in */ + chan = nouveau_fence_channel(new_bo->bo.sync_obj); + if (!chan) + chan = nouveau_channel_get_unlocked(dev_priv->channel); + mutex_lock(&chan->mutex); + + /* Emit a page flip */ + ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); + nouveau_channel_put(&chan); + if (ret) + goto fail_unreserve; + + /* Update the crtc struct and cleanup */ + crtc->fb = fb; + + nouveau_page_flip_unreserve(old_bo, new_bo, fence); + nouveau_fence_unref(&fence); + return 0; + +fail_unreserve: + nouveau_page_flip_unreserve(old_bo, new_bo, NULL); +fail_free: + kfree(s); + return ret; +} + +int +nouveau_finish_page_flip(struct nouveau_channel *chan, + struct nouveau_page_flip_state *ps) +{ + struct drm_device *dev = chan->dev; + struct nouveau_page_flip_state *s; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + if (list_empty(&chan->nvsw.flip)) { + NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); + spin_unlock_irqrestore(&dev->event_lock, flags); + return -EINVAL; + } + + s = list_first_entry(&chan->nvsw.flip, + struct nouveau_page_flip_state, head); + if (s->event) { + struct drm_pending_vblank_event *e = s->event; + struct timeval now; + + do_gettimeofday(&now); + e->event.sequence = 0; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + + list_del(&s->head); + *ps = *s; + kfree(s); + + spin_unlock_irqrestore(&dev->event_lock, flags); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 82581e600dcd..6ff77cedc008 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *obj = NULL; int ret, i; /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ - ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? - 0x0039 : 0x5039, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvM2MF, obj); - nouveau_gpuobj_ref(NULL, &obj); + ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ? + 0x0039 : 0x5039); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 4562f309ae3d..38d599554bce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) struct bit_displayport_encoder_table *dpe; int dpe_headerlen; uint8_t config[4], status[3]; - bool cr_done, cr_max_vs, eq_done; + bool cr_done, cr_max_vs, eq_done, hpd_state; int ret = 0, i, tries, voltage; NV_DEBUG_KMS(dev, "link training!!\n"); @@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) /* disable hotplug detect, this flips around on some panels during * link training. */ - pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); + hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); if (dpe->script0) { NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); @@ -439,7 +439,7 @@ stop: } /* re-enable hotplug detect */ - pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); + pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); return eq_done; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 90875494a65a..bb170570938b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); int nouveau_perflvl_wr; module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); +MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); +int nouveau_msi; +module_param_named(msi, nouveau_msi, int, 0400); + int nouveau_fbpercrtc; #if 0 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); @@ -193,23 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) NV_INFO(dev, "Idling channels...\n"); for (i = 0; i < pfifo->channels; i++) { - struct nouveau_fence *fence = NULL; - - chan = dev_priv->fifos[i]; - if (!chan || (dev_priv->card_type >= NV_50 && - chan == dev_priv->fifos[0])) - continue; - - ret = nouveau_fence_new(chan, &fence, true); - if (ret == 0) { - ret = nouveau_fence_wait(fence, NULL, false, false); - nouveau_fence_unref((void *)&fence); - } + chan = dev_priv->channels.ptr[i]; - if (ret) { - NV_ERROR(dev, "Failed to idle channel %d for suspend\n", - chan->id); - } + if (chan && chan->pushbuf_bo) + nouveau_channel_idle(chan); } pgraph->fifo_access(dev, false); @@ -219,17 +210,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) pfifo->unload_context(dev); pgraph->unload_context(dev); - NV_INFO(dev, "Suspending GPU objects...\n"); - ret = nouveau_gpuobj_suspend(dev); + ret = pinstmem->suspend(dev); if (ret) { NV_ERROR(dev, "... failed: %d\n", ret); goto out_abort; } - ret = pinstmem->suspend(dev); + NV_INFO(dev, "Suspending GPU objects...\n"); + ret = nouveau_gpuobj_suspend(dev); if (ret) { NV_ERROR(dev, "... failed: %d\n", ret); - nouveau_gpuobj_suspend_cleanup(dev); + pinstmem->resume(dev); goto out_abort; } @@ -294,17 +285,18 @@ nouveau_pci_resume(struct pci_dev *pdev) } } + NV_INFO(dev, "Restoring GPU objects...\n"); + nouveau_gpuobj_resume(dev); + NV_INFO(dev, "Reinitialising engines...\n"); engine->instmem.resume(dev); engine->mc.init(dev); engine->timer.init(dev); engine->fb.init(dev); engine->graph.init(dev); + engine->crypt.init(dev); engine->fifo.init(dev); - NV_INFO(dev, "Restoring GPU objects...\n"); - nouveau_gpuobj_resume(dev); - nouveau_irq_postinstall(dev); /* Re-write SKIPS, they'll have been lost over the suspend */ @@ -313,7 +305,7 @@ nouveau_pci_resume(struct pci_dev *pdev) int j; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - chan = dev_priv->fifos[i]; + chan = dev_priv->channels.ptr[i]; if (!chan || !chan->pushbuf_bo) continue; @@ -347,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; - nv_crtc->cursor.set_offset(nv_crtc, - nv_crtc->cursor.nvbo->bo.offset - - dev_priv->vm_vram_base); - + nv_crtc->cursor.set_offset(nv_crtc, offset); nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, - nv_crtc->cursor_saved_y); + nv_crtc->cursor_saved_y); } /* Force CLUT to get re-loaded during modeset */ @@ -393,6 +383,9 @@ static struct drm_driver driver = { .irq_postinstall = nouveau_irq_postinstall, .irq_uninstall = nouveau_irq_uninstall, .irq_handler = nouveau_irq_handler, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = nouveau_vblank_enable, + .disable_vblank = nouveau_vblank_disable, .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = nouveau_ioctls, .fops = { @@ -403,6 +396,7 @@ static struct drm_driver driver = { .mmap = nouveau_ttm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #if defined(CONFIG_COMPAT) .compat_ioctl = nouveau_compat_ioctl, #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1c7db64c03bf..8f13906185b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -54,22 +54,36 @@ struct nouveau_fpriv { #include "nouveau_drm.h" #include "nouveau_reg.h" #include "nouveau_bios.h" +#include "nouveau_util.h" + struct nouveau_grctx; +struct nouveau_vram; +#include "nouveau_vm.h" #define MAX_NUM_DCB_ENTRIES 16 #define NOUVEAU_MAX_CHANNEL_NR 128 #define NOUVEAU_MAX_TILE_NR 15 -#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) -#define NV50_VM_BLOCK (512*1024*1024ULL) -#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) +struct nouveau_vram { + struct drm_device *dev; + + struct nouveau_vma bar_vma; + + struct list_head regions; + u32 memtype; + u64 offset; + u64 size; +}; struct nouveau_tile_reg { - struct nouveau_fence *fence; - uint32_t addr; - uint32_t size; bool used; + uint32_t addr; + uint32_t limit; + uint32_t pitch; + uint32_t zcomp; + struct drm_mm_node *tag_mem; + struct nouveau_fence *fence; }; struct nouveau_bo { @@ -88,6 +102,7 @@ struct nouveau_bo { struct nouveau_channel *channel; + struct nouveau_vma vma; bool mappable; bool no_vm; @@ -96,7 +111,6 @@ struct nouveau_bo { struct nouveau_tile_reg *tile; struct drm_gem_object *gem; - struct drm_file *cpu_filp; int pin_refcnt; }; @@ -133,20 +147,28 @@ enum nouveau_flags { #define NVOBJ_ENGINE_SW 0 #define NVOBJ_ENGINE_GR 1 -#define NVOBJ_ENGINE_DISPLAY 2 +#define NVOBJ_ENGINE_PPP 2 +#define NVOBJ_ENGINE_COPY 3 +#define NVOBJ_ENGINE_VP 4 +#define NVOBJ_ENGINE_CRYPT 5 +#define NVOBJ_ENGINE_BSP 6 +#define NVOBJ_ENGINE_DISPLAY 0xcafe0001 #define NVOBJ_ENGINE_INT 0xdeadbeef +#define NVOBJ_FLAG_DONT_MAP (1 << 0) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) +#define NVOBJ_FLAG_VM (1 << 3) + +#define NVOBJ_CINST_GLOBAL 0xdeadbeef + struct nouveau_gpuobj { struct drm_device *dev; struct kref refcount; struct list_head list; - struct drm_mm_node *im_pramin; - struct nouveau_bo *im_backing; - uint32_t *im_backing_suspend; - int im_bound; + void *node; + u32 *suspend; uint32_t flags; @@ -162,10 +184,29 @@ struct nouveau_gpuobj { void *priv; }; +struct nouveau_page_flip_state { + struct list_head head; + struct drm_pending_vblank_event *event; + int crtc, bpp, pitch, x, y; + uint64_t offset; +}; + +enum nouveau_channel_mutex_class { + NOUVEAU_UCHANNEL_MUTEX, + NOUVEAU_KCHANNEL_MUTEX +}; + struct nouveau_channel { struct drm_device *dev; int id; + /* references to the channel data structure */ + struct kref ref; + /* users of the hardware channel resources, the hardware + * context will be kicked off when it reaches zero. */ + atomic_t users; + struct mutex mutex; + /* owner of this fifo */ struct drm_file *file_priv; /* mapping of the fifo itself */ @@ -202,12 +243,12 @@ struct nouveau_channel { /* PGRAPH context */ /* XXX may be merge 2 pointers as private data ??? */ struct nouveau_gpuobj *ramin_grctx; + struct nouveau_gpuobj *crypt_ctx; void *pgraph_ctx; /* NV50 VM */ + struct nouveau_vm *vm; struct nouveau_gpuobj *vm_pd; - struct nouveau_gpuobj *vm_gart_pt; - struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; /* Objects */ struct nouveau_gpuobj *ramin; /* Private instmem */ @@ -238,9 +279,11 @@ struct nouveau_channel { struct { struct nouveau_gpuobj *vblsem; + uint32_t vblsem_head; uint32_t vblsem_offset; uint32_t vblsem_rval; struct list_head vbl_wait; + struct list_head flip; } nvsw; struct { @@ -258,11 +301,11 @@ struct nouveau_instmem_engine { int (*suspend)(struct drm_device *dev); void (*resume)(struct drm_device *dev); - int (*populate)(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); - void (*clear)(struct drm_device *, struct nouveau_gpuobj *); - int (*bind)(struct drm_device *, struct nouveau_gpuobj *); - int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); + int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); + void (*put)(struct nouveau_gpuobj *); + int (*map)(struct nouveau_gpuobj *); + void (*unmap)(struct nouveau_gpuobj *); + void (*flush)(struct drm_device *); }; @@ -279,12 +322,17 @@ struct nouveau_timer_engine { struct nouveau_fb_engine { int num_tiles; + struct drm_mm tag_heap; + void *priv; int (*init)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); - void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch); + void (*init_tile_region)(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); + void (*set_tile_region)(struct drm_device *dev, int i); + void (*free_tile_region)(struct drm_device *dev, int i); }; struct nouveau_fifo_engine { @@ -310,21 +358,9 @@ struct nouveau_fifo_engine { void (*tlb_flush)(struct drm_device *dev); }; -struct nouveau_pgraph_object_method { - int id; - int (*exec)(struct nouveau_channel *chan, int grclass, int mthd, - uint32_t data); -}; - -struct nouveau_pgraph_object_class { - int id; - bool software; - struct nouveau_pgraph_object_method *methods; -}; - struct nouveau_pgraph_engine { - struct nouveau_pgraph_object_class *grclass; bool accel_blocked; + bool registered; int grctx_size; /* NV2x/NV3x context table (0x400780) */ @@ -342,8 +378,7 @@ struct nouveau_pgraph_engine { int (*unload_context)(struct drm_device *); void (*tlb_flush)(struct drm_device *dev); - void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch); + void (*set_tile_region)(struct drm_device *dev, int i); }; struct nouveau_display_engine { @@ -355,13 +390,19 @@ struct nouveau_display_engine { }; struct nouveau_gpio_engine { + void *priv; + int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); int (*get)(struct drm_device *, enum dcb_gpio_tag); int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); - void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); + int (*irq_register)(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); + void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); + bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); }; struct nouveau_pm_voltage_level { @@ -437,6 +478,7 @@ struct nouveau_pm_engine { struct nouveau_pm_level *cur; struct device *hwmon; + struct notifier_block acpi_nb; int (*clock_get)(struct drm_device *, u32 id); void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, @@ -449,6 +491,25 @@ struct nouveau_pm_engine { int (*temp_get)(struct drm_device *); }; +struct nouveau_crypt_engine { + bool registered; + + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + void (*tlb_flush)(struct drm_device *dev); +}; + +struct nouveau_vram_engine { + int (*init)(struct drm_device *); + int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, + u32 type, struct nouveau_vram **); + void (*put)(struct drm_device *, struct nouveau_vram **); + + bool (*flags_valid)(struct drm_device *, u32 tile_flags); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -459,6 +520,8 @@ struct nouveau_engine { struct nouveau_display_engine display; struct nouveau_gpio_engine gpio; struct nouveau_pm_engine pm; + struct nouveau_crypt_engine crypt; + struct nouveau_vram_engine vram; }; struct nouveau_pll_vals { @@ -577,18 +640,15 @@ struct drm_nouveau_private { bool ramin_available; struct drm_mm ramin_heap; struct list_head gpuobj_list; + struct list_head classes; struct nouveau_bo *vga_ram; + /* interrupt handling */ + void (*irq_handler[32])(struct drm_device *); + bool msi_enabled; struct workqueue_struct *wq; struct work_struct irq_work; - struct work_struct hpd_work; - - struct { - spinlock_t lock; - uint32_t hpd0_bits; - uint32_t hpd1_bits; - } hpd_state; struct list_head vbl_waiting; @@ -605,8 +665,10 @@ struct drm_nouveau_private { struct nouveau_bo *bo; } fence; - int fifo_alloc_count; - struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + struct { + spinlock_t lock; + struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; + } channels; struct nouveau_engine engine; struct nouveau_channel *channel; @@ -632,12 +694,14 @@ struct drm_nouveau_private { uint64_t aper_free; struct nouveau_gpuobj *sg_ctxdma; - struct page *sg_dummy_page; - dma_addr_t sg_dummy_bus; + struct nouveau_vma vma; } gart_info; /* nv10-nv40 tiling regions */ - struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; + struct { + struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; + spinlock_t lock; + } tile; /* VRAM/fb configuration */ uint64_t vram_size; @@ -650,14 +714,12 @@ struct drm_nouveau_private { uint64_t fb_aper_free; int fb_mtrr; + /* BAR control (NV50-) */ + struct nouveau_vm *bar1_vm; + struct nouveau_vm *bar3_vm; + /* G8x/G9x virtual address space */ - uint64_t vm_gart_base; - uint64_t vm_gart_size; - uint64_t vm_vram_base; - uint64_t vm_vram_size; - uint64_t vm_end; - struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; - int vm_vram_pt_nr; + struct nouveau_vm *chan_vm; struct nvbios vbios; @@ -674,6 +736,7 @@ struct drm_nouveau_private { struct backlight_device *backlight; struct nouveau_channel *evo; + u32 evo_alloc; struct { struct dcb_entry *dcb; u16 script; @@ -719,16 +782,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) return 0; } -#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ - struct drm_nouveau_private *nv = dev->dev_private; \ - if (!nouveau_channel_owner(dev, (cl), (id))) { \ - NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ - DRM_CURRENTPID, (id)); \ - return -EPERM; \ - } \ - (ch) = nv->fifos[(id)]; \ -} while (0) - /* nouveau_drv.c */ extern int nouveau_agpmode; extern int nouveau_duallink; @@ -748,6 +801,7 @@ extern int nouveau_force_post; extern int nouveau_override_conntype; extern char *nouveau_perflvl; extern int nouveau_perflvl_wr; +extern int nouveau_msi; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); @@ -762,8 +816,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data, struct drm_file *); extern int nouveau_ioctl_setparam(struct drm_device *, void *data, struct drm_file *); -extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, - uint32_t reg, uint32_t mask, uint32_t val); +extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); +extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); @@ -775,18 +831,15 @@ extern void nouveau_mem_gart_fini(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); -extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, - uint32_t addr, - uint32_t size, - uint32_t pitch); -extern void nv10_mem_expire_tiling(struct drm_device *dev, - struct nouveau_tile_reg *tile, - struct nouveau_fence *fence); -extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, - uint32_t size, uint32_t flags, - uint64_t phys); -extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, - uint32_t size); +extern int nouveau_mem_detect(struct drm_device *); +extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); +extern struct nouveau_tile_reg *nv10_mem_set_tiling( + struct drm_device *dev, uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv10_mem_put_tile_region(struct drm_device *dev, + struct nouveau_tile_reg *tile, + struct nouveau_fence *fence); +extern const struct ttm_mem_type_manager_func nouveau_vram_manager; /* nouveau_notifier.c */ extern int nouveau_notifier_init_channel(struct nouveau_channel *); @@ -803,21 +856,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); -extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, - int channel); extern int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan, struct drm_file *file_priv, uint32_t fb_ctxdma, uint32_t tt_ctxdma); -extern void nouveau_channel_free(struct nouveau_channel *); +extern struct nouveau_channel * +nouveau_channel_get_unlocked(struct nouveau_channel *); +extern struct nouveau_channel * +nouveau_channel_get(struct drm_device *, struct drm_file *, int id); +extern void nouveau_channel_put_unlocked(struct nouveau_channel **); +extern void nouveau_channel_put(struct nouveau_channel **); +extern void nouveau_channel_ref(struct nouveau_channel *chan, + struct nouveau_channel **pchan); +extern void nouveau_channel_idle(struct nouveau_channel *chan); /* nouveau_object.c */ +#define NVOBJ_CLASS(d,c,e) do { \ + int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ + if (ret) \ + return ret; \ +} while(0) + +#define NVOBJ_MTHD(d,c,m,e) do { \ + int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ + if (ret) \ + return ret; \ +} while(0) + extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); extern int nouveau_gpuobj_suspend(struct drm_device *dev); -extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); extern void nouveau_gpuobj_resume(struct drm_device *dev); +extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); +extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, + int (*exec)(struct nouveau_channel *, + u32 class, u32 mthd, u32 data)); +extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); +extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); @@ -832,21 +908,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, - uint64_t offset, uint64_t size, - int access, struct nouveau_gpuobj **, - uint32_t *o_ret); -extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, - struct nouveau_gpuobj **); -extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, - struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class); +extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, + u64 size, int target, int access, u32 type, + u32 comp, struct nouveau_gpuobj **pobj); +extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, + int class, u64 base, u64 size, int target, + int access, u32 type, u32 comp); extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, struct drm_file *); extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, struct drm_file *); /* nouveau_irq.c */ +extern int nouveau_irq_init(struct drm_device *); +extern void nouveau_irq_fini(struct drm_device *); extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); +extern void nouveau_irq_register(struct drm_device *, int status_bit, + void (*)(struct drm_device *)); +extern void nouveau_irq_unregister(struct drm_device *, int status_bit); extern void nouveau_irq_preinstall(struct drm_device *); extern int nouveau_irq_postinstall(struct drm_device *); extern void nouveau_irq_uninstall(struct drm_device *); @@ -854,8 +934,8 @@ extern void nouveau_irq_uninstall(struct drm_device *); /* nouveau_sgdma.c */ extern int nouveau_sgdma_init(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *); -extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, - uint32_t *page); +extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, + uint32_t offset); extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); /* nouveau_debugfs.c */ @@ -966,18 +1046,25 @@ extern void nv04_fb_takedown(struct drm_device *); /* nv10_fb.c */ extern int nv10_fb_init(struct drm_device *); extern void nv10_fb_takedown(struct drm_device *); -extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); +extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); /* nv30_fb.c */ extern int nv30_fb_init(struct drm_device *); extern void nv30_fb_takedown(struct drm_device *); +extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); /* nv40_fb.c */ extern int nv40_fb_init(struct drm_device *); extern void nv40_fb_takedown(struct drm_device *); -extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); + /* nv50_fb.c */ extern int nv50_fb_init(struct drm_device *); extern void nv50_fb_takedown(struct drm_device *); @@ -989,6 +1076,7 @@ extern void nvc0_fb_takedown(struct drm_device *); /* nv04_fifo.c */ extern int nv04_fifo_init(struct drm_device *); +extern void nv04_fifo_fini(struct drm_device *); extern void nv04_fifo_disable(struct drm_device *); extern void nv04_fifo_enable(struct drm_device *); extern bool nv04_fifo_reassign(struct drm_device *, bool); @@ -998,19 +1086,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *); extern void nv04_fifo_destroy_context(struct nouveau_channel *); extern int nv04_fifo_load_context(struct nouveau_channel *); extern int nv04_fifo_unload_context(struct drm_device *); +extern void nv04_fifo_isr(struct drm_device *); /* nv10_fifo.c */ extern int nv10_fifo_init(struct drm_device *); extern int nv10_fifo_channel_id(struct drm_device *); extern int nv10_fifo_create_context(struct nouveau_channel *); -extern void nv10_fifo_destroy_context(struct nouveau_channel *); extern int nv10_fifo_load_context(struct nouveau_channel *); extern int nv10_fifo_unload_context(struct drm_device *); /* nv40_fifo.c */ extern int nv40_fifo_init(struct drm_device *); extern int nv40_fifo_create_context(struct nouveau_channel *); -extern void nv40_fifo_destroy_context(struct nouveau_channel *); extern int nv40_fifo_load_context(struct nouveau_channel *); extern int nv40_fifo_unload_context(struct drm_device *); @@ -1038,7 +1125,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *); extern int nvc0_fifo_unload_context(struct drm_device *); /* nv04_graph.c */ -extern struct nouveau_pgraph_object_class nv04_graph_grclass[]; extern int nv04_graph_init(struct drm_device *); extern void nv04_graph_takedown(struct drm_device *); extern void nv04_graph_fifo_access(struct drm_device *, bool); @@ -1047,10 +1133,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *); extern void nv04_graph_destroy_context(struct nouveau_channel *); extern int nv04_graph_load_context(struct nouveau_channel *); extern int nv04_graph_unload_context(struct drm_device *); -extern void nv04_graph_context_switch(struct drm_device *); +extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data); +extern struct nouveau_bitfield nv04_graph_nsource[]; /* nv10_graph.c */ -extern struct nouveau_pgraph_object_class nv10_graph_grclass[]; extern int nv10_graph_init(struct drm_device *); extern void nv10_graph_takedown(struct drm_device *); extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); @@ -1058,13 +1145,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *); extern void nv10_graph_destroy_context(struct nouveau_channel *); extern int nv10_graph_load_context(struct nouveau_channel *); extern int nv10_graph_unload_context(struct drm_device *); -extern void nv10_graph_context_switch(struct drm_device *); -extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); +extern struct nouveau_bitfield nv10_graph_intr[]; +extern struct nouveau_bitfield nv10_graph_nstatus[]; /* nv20_graph.c */ -extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; -extern struct nouveau_pgraph_object_class nv30_graph_grclass[]; extern int nv20_graph_create_context(struct nouveau_channel *); extern void nv20_graph_destroy_context(struct nouveau_channel *); extern int nv20_graph_load_context(struct nouveau_channel *); @@ -1072,11 +1157,9 @@ extern int nv20_graph_unload_context(struct drm_device *); extern int nv20_graph_init(struct drm_device *); extern void nv20_graph_takedown(struct drm_device *); extern int nv30_graph_init(struct drm_device *); -extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); /* nv40_graph.c */ -extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; extern int nv40_graph_init(struct drm_device *); extern void nv40_graph_takedown(struct drm_device *); extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); @@ -1085,11 +1168,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *); extern int nv40_graph_load_context(struct nouveau_channel *); extern int nv40_graph_unload_context(struct drm_device *); extern void nv40_grctx_init(struct nouveau_grctx *); -extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); /* nv50_graph.c */ -extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; extern int nv50_graph_init(struct drm_device *); extern void nv50_graph_takedown(struct drm_device *); extern void nv50_graph_fifo_access(struct drm_device *, bool); @@ -1098,7 +1179,6 @@ extern int nv50_graph_create_context(struct nouveau_channel *); extern void nv50_graph_destroy_context(struct nouveau_channel *); extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); -extern void nv50_graph_context_switch(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); extern void nv50_graph_tlb_flush(struct drm_device *dev); extern void nv86_graph_tlb_flush(struct drm_device *dev); @@ -1113,16 +1193,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *); extern int nvc0_graph_load_context(struct nouveau_channel *); extern int nvc0_graph_unload_context(struct drm_device *); +/* nv84_crypt.c */ +extern int nv84_crypt_init(struct drm_device *dev); +extern void nv84_crypt_fini(struct drm_device *dev); +extern int nv84_crypt_create_context(struct nouveau_channel *); +extern void nv84_crypt_destroy_context(struct nouveau_channel *); +extern void nv84_crypt_tlb_flush(struct drm_device *dev); + /* nv04_instmem.c */ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *); -extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); -extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv04_instmem_put(struct nouveau_gpuobj *); +extern int nv04_instmem_map(struct nouveau_gpuobj *); +extern void nv04_instmem_unmap(struct nouveau_gpuobj *); extern void nv04_instmem_flush(struct drm_device *); /* nv50_instmem.c */ @@ -1130,25 +1216,22 @@ extern int nv50_instmem_init(struct drm_device *); extern void nv50_instmem_takedown(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *); -extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); -extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv50_instmem_put(struct nouveau_gpuobj *); +extern int nv50_instmem_map(struct nouveau_gpuobj *); +extern void nv50_instmem_unmap(struct nouveau_gpuobj *); extern void nv50_instmem_flush(struct drm_device *); extern void nv84_instmem_flush(struct drm_device *); -extern void nv50_vm_flush(struct drm_device *, int engine); /* nvc0_instmem.c */ extern int nvc0_instmem_init(struct drm_device *); extern void nvc0_instmem_takedown(struct drm_device *); extern int nvc0_instmem_suspend(struct drm_device *); extern void nvc0_instmem_resume(struct drm_device *); -extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); -extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nvc0_instmem_put(struct nouveau_gpuobj *); +extern int nvc0_instmem_map(struct nouveau_gpuobj *); +extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); extern void nvc0_instmem_flush(struct drm_device *); /* nv04_mc.c */ @@ -1219,6 +1302,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); +extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); +extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu); /* nouveau_fence.c */ struct nouveau_fence; @@ -1234,12 +1320,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence, void (*work)(void *priv, bool signalled), void *priv); struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); -extern bool nouveau_fence_signalled(void *obj, void *arg); -extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); + +extern bool __nouveau_fence_signalled(void *obj, void *arg); +extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); +extern int __nouveau_fence_flush(void *obj, void *arg); +extern void __nouveau_fence_unref(void **obj); +extern void *__nouveau_fence_ref(void *obj); + +static inline bool nouveau_fence_signalled(struct nouveau_fence *obj) +{ + return __nouveau_fence_signalled(obj, NULL); +} +static inline int +nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr) +{ + return __nouveau_fence_wait(obj, NULL, lazy, intr); +} extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); -extern int nouveau_fence_flush(void *obj, void *arg); -extern void nouveau_fence_unref(void **obj); -extern void *nouveau_fence_ref(void *obj); +static inline int nouveau_fence_flush(struct nouveau_fence *obj) +{ + return __nouveau_fence_flush(obj, NULL); +} +static inline void nouveau_fence_unref(struct nouveau_fence **obj) +{ + __nouveau_fence_unref((void **)obj); +} +static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) +{ + return __nouveau_fence_ref(obj); +} /* nouveau_gem.c */ extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, @@ -1259,15 +1368,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, extern int nouveau_gem_ioctl_info(struct drm_device *, void *, struct drm_file *); +/* nouveau_display.c */ +int nouveau_vblank_enable(struct drm_device *dev, int crtc); +void nouveau_vblank_disable(struct drm_device *dev, int crtc); +int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); +int nouveau_finish_page_flip(struct nouveau_channel *, + struct nouveau_page_flip_state *); + /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); /* nv50_gpio.c */ int nv50_gpio_init(struct drm_device *dev); +void nv50_gpio_fini(struct drm_device *dev); int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); -void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); +int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); +void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); +bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); /* nv50_calc. */ int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, @@ -1334,7 +1456,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) } #define nv_wait(dev, reg, mask, val) \ - nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) + nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) +#define nv_wait_ne(dev, reg, mask, val) \ + nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) /* PRAMIN access */ static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) @@ -1447,6 +1571,23 @@ nv_match_device(struct drm_device *dev, unsigned device, dev->pdev->subsystem_device == sub_device; } +/* memory type/access flags, do not match hardware values */ +#define NV_MEM_ACCESS_RO 1 +#define NV_MEM_ACCESS_WO 2 +#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) +#define NV_MEM_ACCESS_SYS 4 +#define NV_MEM_ACCESS_VM 8 + +#define NV_MEM_TARGET_VRAM 0 +#define NV_MEM_TARGET_PCI 1 +#define NV_MEM_TARGET_PCI_NOSNOOP 2 +#define NV_MEM_TARGET_VM 3 +#define NV_MEM_TARGET_GART 4 + +#define NV_MEM_TYPE_VM 0x7f +#define NV_MEM_COMP_VM 0x03 + +/* NV_SW object class */ #define NV_SW 0x0000506e #define NV_SW_DMA_SEMAPHORE 0x00000060 #define NV_SW_SEMAPHORE_OFFSET 0x00000064 @@ -1457,5 +1598,6 @@ nv_match_device(struct drm_device *dev, unsigned device, #define NV_SW_VBLSEM_OFFSET 0x00000400 #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 #define NV_SW_VBLSEM_RELEASE 0x00000408 +#define NV_SW_PAGE_FLIP 0x00000500 #endif /* __NOUVEAU_DRV_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 02a4d1fd4845..ea861c915149 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -49,6 +49,96 @@ #include "nouveau_fbcon.h" #include "nouveau_dma.h" +static void +nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_fillrect(info, rect); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_fillrect(info, rect); + mutex_unlock(&dev_priv->channel->mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_fillrect(info, rect); +} + +static void +nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_copyarea(info, image); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_copyarea(info, image); + mutex_unlock(&dev_priv->channel->mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_copyarea(info, image); +} + +static void +nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_imageblit(info, image); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_imageblit(info, image); + mutex_unlock(&dev_priv->channel->mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_imageblit(info, image); +} + static int nouveau_fbcon_sync(struct fb_info *info) { @@ -58,12 +148,17 @@ nouveau_fbcon_sync(struct fb_info *info) struct nouveau_channel *chan = dev_priv->channel; int ret, i; - if (!chan || !chan->accel_done || + if (!chan || !chan->accel_done || in_interrupt() || info->state != FBINFO_STATE_RUNNING || info->flags & FBINFO_HWACCEL_DISABLED) return 0; - if (RING_SPACE(chan, 4)) { + if (!mutex_trylock(&chan->mutex)) + return 0; + + ret = RING_SPACE(chan, 4); + if (ret) { + mutex_unlock(&chan->mutex); nouveau_fbcon_gpu_lockup(info); return 0; } @@ -74,6 +169,7 @@ nouveau_fbcon_sync(struct fb_info *info) OUT_RING(chan, 0); nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); FIRE_RING(chan); + mutex_unlock(&chan->mutex); ret = -EBUSY; for (i = 0; i < 100000; i++) { @@ -97,9 +193,9 @@ static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = nouveau_fbcon_fillrect, + .fb_copyarea = nouveau_fbcon_copyarea, + .fb_imageblit = nouveau_fbcon_imageblit, .fb_sync = nouveau_fbcon_sync, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, @@ -108,29 +204,13 @@ static struct fb_ops nouveau_fbcon_ops = { .fb_debug_leave = drm_fb_helper_debug_leave, }; -static struct fb_ops nv04_fbcon_ops = { +static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = nv04_fbcon_fillrect, - .fb_copyarea = nv04_fbcon_copyarea, - .fb_imageblit = nv04_fbcon_imageblit, - .fb_sync = nouveau_fbcon_sync, - .fb_pan_display = drm_fb_helper_pan_display, - .fb_blank = drm_fb_helper_blank, - .fb_setcmap = drm_fb_helper_setcmap, - .fb_debug_enter = drm_fb_helper_debug_enter, - .fb_debug_leave = drm_fb_helper_debug_leave, -}; - -static struct fb_ops nv50_fbcon_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = nv50_fbcon_fillrect, - .fb_copyarea = nv50_fbcon_copyarea, - .fb_imageblit = nv50_fbcon_imageblit, - .fb_sync = nouveau_fbcon_sync, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -257,9 +337,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->flags |= FBINFO_CAN_FORCE_OUTPUT; - info->fbops = &nouveau_fbcon_ops; - info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - - dev_priv->vm_vram_base; + info->fbops = &nouveau_fbcon_sw_ops; + info->fix.smem_start = dev->mode_config.fb_base + + (nvbo->bo.mem.start << PAGE_SHIFT); info->fix.smem_len = size; info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); @@ -285,19 +365,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; + mutex_unlock(&dev->struct_mutex); + if (dev_priv->channel && !nouveau_nofbaccel) { - switch (dev_priv->card_type) { - case NV_C0: - break; - case NV_50: - nv50_fbcon_accel_init(info); - info->fbops = &nv50_fbcon_ops; - break; - default: - nv04_fbcon_accel_init(info); - info->fbops = &nv04_fbcon_ops; - break; - }; + ret = -ENODEV; + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_accel_init(info); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_accel_init(info); + + if (ret == 0) + info->fbops = &nouveau_fbcon_ops; } nouveau_fbcon_zfill(dev, nfbdev); @@ -308,7 +387,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, nouveau_fb->base.height, nvbo->bo.offset, nvbo); - mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index e7e12684c37e..6b933f2c3a5b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -40,13 +40,13 @@ struct nouveau_fbdev { void nouveau_fbcon_restore(void); -void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); -void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_accel_init(struct fb_info *info); -void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); -void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab1bbfbf266e..374a9793b85f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref) struct nouveau_fence *fence = container_of(ref, struct nouveau_fence, refcount); + nouveau_channel_ref(NULL, &fence->channel); kfree(fence); } @@ -76,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan) spin_lock(&chan->fence.lock); - if (USE_REFCNT(dev)) - sequence = nvchan_rd32(chan, 0x48); - else - sequence = atomic_read(&chan->fence.last_sequence_irq); + /* Fetch the last sequence if the channel is still up and running */ + if (likely(!list_empty(&chan->fence.pending))) { + if (USE_REFCNT(dev)) + sequence = nvchan_rd32(chan, 0x48); + else + sequence = atomic_read(&chan->fence.last_sequence_irq); - if (chan->fence.sequence_ack == sequence) - goto out; - chan->fence.sequence_ack = sequence; + if (chan->fence.sequence_ack == sequence) + goto out; + chan->fence.sequence_ack = sequence; + } list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { sequence = fence->sequence; @@ -113,13 +117,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, if (!fence) return -ENOMEM; kref_init(&fence->refcount); - fence->channel = chan; + nouveau_channel_ref(chan, &fence->channel); if (emit) ret = nouveau_fence_emit(fence); if (ret) - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); *pfence = fence; return ret; } @@ -127,7 +131,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, struct nouveau_channel * nouveau_fence_channel(struct nouveau_fence *fence) { - return fence ? fence->channel : NULL; + return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; } int @@ -182,7 +186,7 @@ nouveau_fence_work(struct nouveau_fence *fence, } void -nouveau_fence_unref(void **sync_obj) +__nouveau_fence_unref(void **sync_obj) { struct nouveau_fence *fence = nouveau_fence(*sync_obj); @@ -192,7 +196,7 @@ nouveau_fence_unref(void **sync_obj) } void * -nouveau_fence_ref(void *sync_obj) +__nouveau_fence_ref(void *sync_obj) { struct nouveau_fence *fence = nouveau_fence(sync_obj); @@ -201,7 +205,7 @@ nouveau_fence_ref(void *sync_obj) } bool -nouveau_fence_signalled(void *sync_obj, void *sync_arg) +__nouveau_fence_signalled(void *sync_obj, void *sync_arg) { struct nouveau_fence *fence = nouveau_fence(sync_obj); struct nouveau_channel *chan = fence->channel; @@ -214,13 +218,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) } int -nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) +__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) { unsigned long timeout = jiffies + (3 * DRM_HZ); + unsigned long sleep_time = jiffies + 1; int ret = 0; while (1) { - if (nouveau_fence_signalled(sync_obj, sync_arg)) + if (__nouveau_fence_signalled(sync_obj, sync_arg)) break; if (time_after_eq(jiffies, timeout)) { @@ -230,7 +235,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); - if (lazy) + if (lazy && time_after_eq(jiffies, sleep_time)) schedule_timeout(1); if (intr && signal_pending(current)) { @@ -368,7 +373,7 @@ emit_semaphore(struct nouveau_channel *chan, int method, kref_get(&sema->ref); nouveau_fence_work(fence, semaphore_work, sema); - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); return 0; } @@ -380,33 +385,49 @@ nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan = nouveau_fence_channel(fence); struct drm_device *dev = wchan->dev; struct nouveau_semaphore *sema; - int ret; + int ret = 0; - if (likely(!fence || chan == wchan || - nouveau_fence_signalled(fence, NULL))) - return 0; + if (likely(!chan || chan == wchan || + nouveau_fence_signalled(fence))) + goto out; sema = alloc_semaphore(dev); if (!sema) { /* Early card or broken userspace, fall back to * software sync. */ - return nouveau_fence_wait(fence, NULL, false, false); + ret = nouveau_fence_wait(fence, true, false); + goto out; + } + + /* try to take chan's mutex, if we can't take it right away + * we have to fallback to software sync to prevent locking + * order issues + */ + if (!mutex_trylock(&chan->mutex)) { + ret = nouveau_fence_wait(fence, true, false); + goto out_unref; } /* Make wchan wait until it gets signalled */ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); if (ret) - goto out; + goto out_unlock; /* Signal the semaphore from chan */ ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); -out: + +out_unlock: + mutex_unlock(&chan->mutex); +out_unref: kref_put(&sema->ref, free_semaphore); +out: + if (chan) + nouveau_channel_put_unlocked(&chan); return ret; } int -nouveau_fence_flush(void *sync_obj, void *sync_arg) +__nouveau_fence_flush(void *sync_obj, void *sync_arg) { return 0; } @@ -420,12 +441,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) int ret; /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvSw, obj); - nouveau_gpuobj_ref(NULL, &obj); + ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); if (ret) return ret; @@ -437,13 +453,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) /* Create a DMA object for the shared cross-channel sync area. */ if (USE_SEMA(dev)) { - struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; + struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, mem->start << PAGE_SHIFT, - mem->size << PAGE_SHIFT, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &obj); + mem->size, NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VRAM, &obj); if (ret) return ret; @@ -473,6 +488,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) { struct nouveau_fence *tmp, *fence; + spin_lock(&chan->fence.lock); + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); @@ -482,6 +499,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) kref_put(&fence->refcount, nouveau_fence_del); } + + spin_unlock(&chan->fence.lock); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9a1fdcf400c2..506c508b7eda 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; nvbo->gem = NULL; - if (unlikely(nvbo->cpu_filp)) - ttm_bo_synccpu_write_release(bo); - if (unlikely(nvbo->pin_refcnt)) { nvbo->pin_refcnt = 1; nouveau_bo_unpin(nvbo); @@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) return 0; } -static bool -nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (dev_priv->card_type >= NV_50) { - switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { - case 0x0000: - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7000: - case 0x7400: - case 0x7a00: - case 0xe000: - return true; - } - } else { - if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) - return true; - } - - NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); - return false; -} - int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; - if (req->channel_hint) { - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, - file_priv, chan); - } - if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) flags |= TTM_PL_FLAG_VRAM; if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) @@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; - if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) + if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { + NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; + } + + if (req->channel_hint) { + chan = nouveau_channel_get(dev, file_priv, req->channel_hint); + if (IS_ERR(chan)) + return PTR_ERR(chan); + } ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, req->info.tile_mode, req->info.tile_flags, false, (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), &nvbo); + if (chan) + nouveau_channel_put(&chan); if (ret) return ret; @@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_for_each_safe(entry, tmp, list) { nvbo = list_entry(entry, struct nouveau_bo, entry); - if (likely(fence)) { - struct nouveau_fence *prev_fence; - - spin_lock(&nvbo->bo.lock); - prev_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = nouveau_fence_ref(fence); - spin_unlock(&nvbo->bo.lock); - nouveau_fence_unref((void *)&prev_fence); - } + + nouveau_bo_fence(nvbo, fence); if (unlikely(nvbo->validate_mapped)) { ttm_bo_kunmap(&nvbo->kmap); @@ -299,14 +268,15 @@ retry: return -EINVAL; } - ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); if (ret) { validate_fini(op, NULL); - if (ret == -EAGAIN) - ret = ttm_bo_wait_unreserved(&nvbo->bo, false); + if (unlikely(ret == -EAGAIN)) + ret = ttm_bo_wait_unreserved(&nvbo->bo, true); drm_gem_object_unreference_unlocked(gem); - if (ret) { - NV_ERROR(dev, "fail reserve\n"); + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "fail reserve\n"); return ret; } goto retry; @@ -331,25 +301,6 @@ retry: validate_fini(op, NULL); return -EINVAL; } - - if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { - validate_fini(op, NULL); - - if (nvbo->cpu_filp == file_priv) { - NV_ERROR(dev, "bo %p mapped by process trying " - "to validate it!\n", nvbo); - return -EINVAL; - } - - mutex_unlock(&drm_global_mutex); - ret = ttm_bo_wait_cpu(&nvbo->bo, false); - mutex_lock(&drm_global_mutex); - if (ret) { - NV_ERROR(dev, "fail wait_cpu\n"); - return ret; - } - goto retry; - } } return 0; @@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, } nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - false, false, false); + ret = nouveau_bo_validate(nvbo, true, false, false); nvbo->channel = NULL; if (unlikely(ret)) { - NV_ERROR(dev, "fail ttm_validate\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "fail ttm_validate\n"); return ret; } @@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { - NV_ERROR(dev, "validate_init\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate_init\n"); return ret; } ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate vram_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate vram_list\n"); validate_fini(op, NULL); return ret; } @@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate gart_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate gart_list\n"); validate_fini(op, NULL); return ret; } @@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate both_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, data |= r->vor; } - spin_lock(&nvbo->bo.lock); + spin_lock(&nvbo->bo.bdev->fence_lock); ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); + spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); break; @@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct nouveau_fence *fence = NULL; int i, j, ret = 0, do_reloc = 0; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, req->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); req->vram_available = dev_priv->fb_aper_free; req->gart_available = dev_priv->gart_info.aper_free; @@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", req->nr_push, NOUVEAU_GEM_MAX_PUSH); + nouveau_channel_put(&chan); return -EINVAL; } if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); + nouveau_channel_put(&chan); return -EINVAL; } if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); + nouveau_channel_put(&chan); return -EINVAL; } push = u_memcpya(req->push, req->nr_push, sizeof(*push)); - if (IS_ERR(push)) + if (IS_ERR(push)) { + nouveau_channel_put(&chan); return PTR_ERR(push); + } bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); if (IS_ERR(bo)) { kfree(push); + nouveau_channel_put(&chan); return PTR_ERR(bo); } @@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, req->nr_buffers, &op, &do_reloc); if (ret) { - NV_ERROR(dev, "validate: %d\n", ret); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate: %d\n", ret); goto out; } @@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, out: validate_fini(&op, fence); - nouveau_fence_unref((void**)&fence); + nouveau_fence_unref(&fence); kfree(bo); kfree(push); @@ -750,6 +714,7 @@ out_next: req->suffix1 = 0x00000000; } + nouveau_channel_put(&chan); return ret; } @@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - if (nvbo->cpu_filp) { - if (nvbo->cpu_filp == file_priv) - goto out; - - ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); - if (ret) - goto out; - } - - if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { - spin_lock(&nvbo->bo.lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); - spin_unlock(&nvbo->bo.lock); - } else { - ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); - if (ret == 0) - nvbo->cpu_filp = file_priv; - } - -out: + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); + spin_unlock(&nvbo->bo.bdev->fence_lock); drm_gem_object_unreference_unlocked(gem); return ret; } @@ -809,26 +757,7 @@ int nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_gem_cpu_prep *req = data; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - int ret = -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -ENOENT; - nvbo = nouveau_gem_object(gem); - - if (nvbo->cpu_filp != file_priv) - goto out; - nvbo->cpu_filp = NULL; - - ttm_bo_synccpu_write_release(&nvbo->bo); - ret = 0; - -out: - drm_gem_object_unreference_unlocked(gem); - return ret; + return 0; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index b9672a05c411..053edf9d2f67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head, NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); - if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) + if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); else NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); @@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head, if (dev_priv->card_type == NV_10) { /* Not waiting for vertical retrace before modifying CRE_53/CRE_54 causes lockups. */ - nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); - nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); + nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); + nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); } wr_cio_state(dev, head, regp, NV_CIO_CRE_53); @@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head, NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); - /* Setting 1 on this value gives you interrupts for every vblank period. */ - NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); + /* Enable vblank interrupts. */ + NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, + (dev->vblank_enabled[head] ? 1 : 0)); NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 7bfd9e6c9d67..2ba7265bc967 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -36,18 +36,7 @@ #include "nouveau_drv.h" #include "nouveau_reg.h" #include "nouveau_ramht.h" -#include <linux/ratelimit.h> - -/* needed for hotplug irq */ -#include "nouveau_connector.h" -#include "nv50_display.h" - -static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); - -static int nouveau_ratelimit(void) -{ - return __ratelimit(&nouveau_ratelimit_state); -} +#include "nouveau_util.h" void nouveau_irq_preinstall(struct drm_device *dev) @@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev) /* Master disable */ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); - if (dev_priv->card_type >= NV_50) { - INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); - INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); - spin_lock_init(&dev_priv->hpd_state.lock); - INIT_LIST_HEAD(&dev_priv->vbl_waiting); - } + INIT_LIST_HEAD(&dev_priv->vbl_waiting); } int nouveau_irq_postinstall(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + /* Master enable */ nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); + if (dev_priv->msi_enabled) + nv_wr08(dev, 0x00088068, 0xff); + return 0; } @@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); } -static int -nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_pgraph_object_method *grm; - struct nouveau_pgraph_object_class *grc; - - grc = dev_priv->engine.graph.grclass; - while (grc->id) { - if (grc->id == class) - break; - grc++; - } - - if (grc->id != class || !grc->methods) - return -ENOENT; - - grm = grc->methods; - while (grm->id) { - if (grm->id == mthd) - return grm->exec(chan, class, mthd, data); - grm++; - } - - return -ENOENT; -} - -static bool -nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) -{ - struct drm_device *dev = chan->dev; - const int subc = (addr >> 13) & 0x7; - const int mthd = addr & 0x1ffc; - - if (mthd == 0x0000) { - struct nouveau_gpuobj *gpuobj; - - gpuobj = nouveau_ramht_find(chan, data); - if (!gpuobj) - return false; - - if (gpuobj->engine != NVOBJ_ENGINE_SW) - return false; - - chan->sw_subchannel[subc] = gpuobj->class; - nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, - NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); - return true; - } - - /* hw object */ - if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4))) - return false; - - if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data)) - return false; - - return true; -} - -static void -nouveau_fifo_irq_handler(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - uint32_t status, reassign; - int cnt = 0; - - reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; - while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - struct nouveau_channel *chan = NULL; - uint32_t chid, get; - - nv_wr32(dev, NV03_PFIFO_CACHES, 0); - - chid = engine->fifo.channel_id(dev); - if (chid >= 0 && chid < engine->fifo.channels) - chan = dev_priv->fifos[chid]; - get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); - - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - uint32_t mthd, data; - int ptr; - - /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before - * wrapping on my G80 chips, but CACHE1 isn't big - * enough for this much data.. Tests show that it - * wraps around to the start at GET=0x800.. No clue - * as to why.. - */ - ptr = (get & 0x7ff) >> 2; - - if (dev_priv->card_type < NV_40) { - mthd = nv_rd32(dev, - NV04_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(dev, - NV04_PFIFO_CACHE1_DATA(ptr)); - } else { - mthd = nv_rd32(dev, - NV40_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(dev, - NV40_PFIFO_CACHE1_DATA(ptr)); - } - - if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) { - NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " - "Mthd 0x%04x Data 0x%08x\n", - chid, (mthd >> 13) & 7, mthd & 0x1ffc, - data); - } - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_CACHE_ERROR); - - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); - nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, - nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); - - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - } - - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - u32 dma_get = nv_rd32(dev, 0x003244); - u32 dma_put = nv_rd32(dev, 0x003240); - u32 push = nv_rd32(dev, 0x003220); - u32 state = nv_rd32(dev, 0x003228); - - if (dev_priv->card_type == NV_50) { - u32 ho_get = nv_rd32(dev, 0x003328); - u32 ho_put = nv_rd32(dev, 0x003320); - u32 ib_get = nv_rd32(dev, 0x003334); - u32 ib_put = nv_rd32(dev, 0x003330); - - if (nouveau_ratelimit()) - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " - "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " - "State 0x%08x Push 0x%08x\n", - chid, ho_get, dma_get, ho_put, - dma_put, ib_get, ib_put, state, - push); - - /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ - nv_wr32(dev, 0x003364, 0x00000000); - if (dma_get != dma_put || ho_get != ho_put) { - nv_wr32(dev, 0x003244, dma_put); - nv_wr32(dev, 0x003328, ho_put); - } else - if (ib_get != ib_put) { - nv_wr32(dev, 0x003334, ib_put); - } - } else { - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " - "Put 0x%08x State 0x%08x Push 0x%08x\n", - chid, dma_get, dma_put, state, push); - - if (dma_get != dma_put) - nv_wr32(dev, 0x003244, dma_put); - } - - nv_wr32(dev, 0x003228, 0x00000000); - nv_wr32(dev, 0x003220, 0x00000001); - nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - } - - if (status & NV_PFIFO_INTR_SEMAPHORE) { - uint32_t sem; - - status &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_SEMAPHORE); - - sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); - } - - if (dev_priv->card_type == NV_50) { - if (status & 0x00000010) { - nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); - status &= ~0x00000010; - nv_wr32(dev, 0x002100, 0x00000010); - } - } - - if (status) { - if (nouveau_ratelimit()) - NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", - status, chid); - nv_wr32(dev, NV03_PFIFO_INTR_0, status); - status = 0; - } - - nv_wr32(dev, NV03_PFIFO_CACHES, reassign); - } - - if (status) { - NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); - nv_wr32(dev, 0x2140, 0); - nv_wr32(dev, 0x140, 0); - } - - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); -} - -struct nouveau_bitfield_names { - uint32_t mask; - const char *name; -}; - -static struct nouveau_bitfield_names nstatus_names[] = -{ - { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, - { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, - { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } -}; - -static struct nouveau_bitfield_names nstatus_names_nv10[] = -{ - { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, - { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, - { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } -}; - -static struct nouveau_bitfield_names nsource_names[] = -{ - { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, - { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, - { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, - { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, - { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, - { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, - { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, - { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, - { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, - { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, - { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, - { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, -}; - -static void -nouveau_print_bitfield_names_(uint32_t value, - const struct nouveau_bitfield_names *namelist, - const int namelist_len) -{ - /* - * Caller must have already printed the KERN_* log level for us. - * Also the caller is responsible for adding the newline. - */ - int i; - for (i = 0; i < namelist_len; ++i) { - uint32_t mask = namelist[i].mask; - if (value & mask) { - printk(" %s", namelist[i].name); - value &= ~mask; - } - } - if (value) - printk(" (unknown bits 0x%08x)", value); -} -#define nouveau_print_bitfield_names(val, namelist) \ - nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) - -struct nouveau_enum_names { - uint32_t value; - const char *name; -}; - -static void -nouveau_print_enum_names_(uint32_t value, - const struct nouveau_enum_names *namelist, - const int namelist_len) -{ - /* - * Caller must have already printed the KERN_* log level for us. - * Also the caller is responsible for adding the newline. - */ - int i; - for (i = 0; i < namelist_len; ++i) { - if (value == namelist[i].value) { - printk("%s", namelist[i].name); - return; - } - } - printk("unknown value 0x%08x", value); -} -#define nouveau_print_enum_names(val, namelist) \ - nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) - -static int -nouveau_graph_chid_from_grctx(struct drm_device *dev) +irqreturn_t +nouveau_irq_handler(DRM_IRQ_ARGS) { + struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t inst; + unsigned long flags; + u32 stat; int i; - if (dev_priv->card_type < NV_40) - return dev_priv->engine.fifo.channels; - else - if (dev_priv->card_type < NV_50) { - inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; - - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; - - if (!chan || !chan->ramin_grctx) - continue; - - if (inst == chan->ramin_grctx->pinst) - break; - } - } else { - inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; - - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; - - if (!chan || !chan->ramin) - continue; - - if (inst == chan->ramin->vinst) - break; - } - } - - - return i; -} - -static int -nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - int channel; - - if (dev_priv->card_type < NV_10) - channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; - else - if (dev_priv->card_type < NV_40) - channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; - else - channel = nouveau_graph_chid_from_grctx(dev); - - if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { - NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); - return -EINVAL; - } - - *channel_ret = channel; - return 0; -} - -struct nouveau_pgraph_trap { - int channel; - int class; - int subc, mthd, size; - uint32_t data, data2; - uint32_t nsource, nstatus; -}; - -static void -nouveau_graph_trap_info(struct drm_device *dev, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t address; - - trap->nsource = trap->nstatus = 0; - if (dev_priv->card_type < NV_50) { - trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); - } - - if (nouveau_graph_trapped_channel(dev, &trap->channel)) - trap->channel = -1; - address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); - - trap->mthd = address & 0x1FFC; - trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); - if (dev_priv->card_type < NV_10) { - trap->subc = (address >> 13) & 0x7; - } else { - trap->subc = (address >> 16) & 0x7; - trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH); - } - - if (dev_priv->card_type < NV_10) - trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF; - else if (dev_priv->card_type < NV_40) - trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF; - else if (dev_priv->card_type < NV_50) - trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF; - else - trap->class = nv_rd32(dev, 0x400814); -} - -static void -nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t nsource = trap->nsource, nstatus = trap->nstatus; - - if (dev_priv->card_type < NV_50) { - NV_INFO(dev, "%s - nSource:", id); - nouveau_print_bitfield_names(nsource, nsource_names); - printk(", nStatus:"); - if (dev_priv->card_type < NV_10) - nouveau_print_bitfield_names(nstatus, nstatus_names); - else - nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); - printk("\n"); - } - - NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " - "Data 0x%08x:0x%08x\n", - id, trap->channel, trap->subc, - trap->class, trap->mthd, - trap->data2, trap->data); -} - -static int -nouveau_pgraph_intr_swmthd(struct drm_device *dev, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (trap->channel < 0 || - trap->channel >= dev_priv->engine.fifo.channels || - !dev_priv->fifos[trap->channel]) - return -ENODEV; - - return nouveau_call_method(dev_priv->fifos[trap->channel], - trap->class, trap->mthd, trap->data); -} - -static inline void -nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; + stat = nv_rd32(dev, NV03_PMC_INTR_0); + if (!stat) + return IRQ_NONE; - nouveau_graph_trap_info(dev, &trap); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + for (i = 0; i < 32 && stat; i++) { + if (!(stat & (1 << i)) || !dev_priv->irq_handler[i]) + continue; - if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - } else { - unhandled = 1; + dev_priv->irq_handler[i](dev); + stat &= ~(1 << i); } - if (unhandled) - nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); -} - - -static inline void -nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; - - nouveau_graph_trap_info(dev, &trap); - trap.nsource = nsource; - - if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { - uint32_t v = nv_rd32(dev, 0x402000); - nv_wr32(dev, 0x402000, v); - - /* dump the error anyway for now: it's useful for - Gallium development */ - unhandled = 1; - } else { - unhandled = 1; - } + if (dev_priv->msi_enabled) + nv_wr08(dev, 0x00088068, 0xff); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - if (unhandled && nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); + if (stat && nouveau_ratelimit()) + NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat); + return IRQ_HANDLED; } -static inline void -nouveau_pgraph_intr_context_switch(struct drm_device *dev) +int +nouveau_irq_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - uint32_t chid; - - chid = engine->fifo.channel_id(dev); - NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid); - - switch (dev_priv->card_type) { - case NV_04: - nv04_graph_context_switch(dev); - break; - case NV_10: - nv10_graph_context_switch(dev); - break; - default: - NV_ERROR(dev, "Context switch not implemented\n"); - break; - } -} - -static void -nouveau_pgraph_irq_handler(struct drm_device *dev) -{ - uint32_t status; - - while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { - uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - - if (status & NV_PGRAPH_INTR_NOTIFY) { - nouveau_pgraph_intr_notify(dev, nsource); - - status &= ~NV_PGRAPH_INTR_NOTIFY; - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); - } - - if (status & NV_PGRAPH_INTR_ERROR) { - nouveau_pgraph_intr_error(dev, nsource); + int ret; - status &= ~NV_PGRAPH_INTR_ERROR; - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); + if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) { + ret = pci_enable_msi(dev->pdev); + if (ret == 0) { + NV_INFO(dev, "enabled MSI\n"); + dev_priv->msi_enabled = true; } - - if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - nv_wr32(dev, NV03_PGRAPH_INTR, - NV_PGRAPH_INTR_CONTEXT_SWITCH); - - nouveau_pgraph_intr_context_switch(dev); - } - - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } - - if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0) - nv_wr32(dev, NV04_PGRAPH_FIFO, 1); } - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); -} - -static struct nouveau_enum_names nv50_mp_exec_error_names[] = -{ - { 3, "STACK_UNDERFLOW" }, - { 4, "QUADON_ACTIVE" }, - { 8, "TIMEOUT" }, - { 0x10, "INVALID_OPCODE" }, - { 0x40, "BREAKPOINT" }, -}; - -static void -nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t units = nv_rd32(dev, 0x1540); - uint32_t addr, mp10, status, pc, oplow, ophigh; - int i; - int mps = 0; - for (i = 0; i < 4; i++) { - if (!(units & 1 << (i+24))) - continue; - if (dev_priv->chipset < 0xa0) - addr = 0x408200 + (tpid << 12) + (i << 7); - else - addr = 0x408100 + (tpid << 11) + (i << 7); - mp10 = nv_rd32(dev, addr + 0x10); - status = nv_rd32(dev, addr + 0x14); - if (!status) - continue; - if (display) { - nv_rd32(dev, addr + 0x20); - pc = nv_rd32(dev, addr + 0x24); - oplow = nv_rd32(dev, addr + 0x70); - ophigh= nv_rd32(dev, addr + 0x74); - NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " - "TP %d MP %d: ", tpid, i); - nouveau_print_enum_names(status, - nv50_mp_exec_error_names); - printk(" at %06x warp %d, opcode %08x %08x\n", - pc&0xffffff, pc >> 24, - oplow, ophigh); - } - nv_wr32(dev, addr + 0x10, mp10); - nv_wr32(dev, addr + 0x14, 0); - mps++; - } - if (!mps && display) - NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " - "No MPs claiming errors?\n", tpid); + return drm_irq_install(dev); } -static void -nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, - uint32_t ustatus_new, int display, const char *name) +void +nouveau_irq_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int tps = 0; - uint32_t units = nv_rd32(dev, 0x1540); - int i, r; - uint32_t ustatus_addr, ustatus; - for (i = 0; i < 16; i++) { - if (!(units & (1 << i))) - continue; - if (dev_priv->chipset < 0xa0) - ustatus_addr = ustatus_old + (i << 12); - else - ustatus_addr = ustatus_new + (i << 11); - ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; - if (!ustatus) - continue; - tps++; - switch (type) { - case 6: /* texture error... unknown for now */ - nv50_fb_vm_trap(dev, display, name); - if (display) { - NV_ERROR(dev, "magic set %d:\n", i); - for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - } - break; - case 7: /* MP error */ - if (ustatus & 0x00010000) { - nv50_pgraph_mp_trap(dev, i, display); - ustatus &= ~0x00010000; - } - break; - case 8: /* TPDMA error */ - { - uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); - uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); - uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); - uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); - uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); - uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); - uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); - nv50_fb_vm_trap(dev, display, name); - /* 2d engine destination */ - if (ustatus & 0x00000010) { - if (display) { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", - i, e14, e10); - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000010; - } - /* Render target */ - if (ustatus & 0x00000040) { - if (display) { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", - i, e14, e10); - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000040; - } - /* CUDA memory: l[], g[] or stack. */ - if (ustatus & 0x00000080) { - if (display) { - if (e18 & 0x80000000) { - /* g[] read fault? */ - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", - i, e14, e10 | ((e18 >> 24) & 0x1f)); - e18 &= ~0x1f000000; - } else if (e18 & 0xc) { - /* g[] write fault? */ - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", - i, e14, e10 | ((e18 >> 7) & 0x1f)); - e18 &= ~0x00000f80; - } else { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", - i, e14, e10); - } - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000080; - } - } - break; - } - if (ustatus) { - if (display) - NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); - } - nv_wr32(dev, ustatus_addr, 0xc0000000); - } - - if (!tps && display) - NV_INFO(dev, "%s - No TPs claiming errors?\n", name); -} - -static void -nv50_pgraph_trap_handler(struct drm_device *dev) -{ - struct nouveau_pgraph_trap trap; - uint32_t status = nv_rd32(dev, 0x400108); - uint32_t ustatus; - int display = nouveau_ratelimit(); - - - if (!status && display) { - nouveau_graph_trap_info(dev, &trap); - nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); - NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); - } - - /* DISPATCH: Relays commands to other units and handles NOTIFY, - * COND, QUERY. If you get a trap from it, the command is still stuck - * in DISPATCH and you need to do something about it. */ - if (status & 0x001) { - ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); - } - - /* Known to be triggered by screwed up NOTIFY and COND... */ - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); - nv_wr32(dev, 0x400500, 0); - if (nv_rd32(dev, 0x400808) & 0x80000000) { - if (display) { - if (nouveau_graph_trapped_channel(dev, &trap.channel)) - trap.channel = -1; - trap.class = nv_rd32(dev, 0x400814); - trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; - trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; - trap.data = nv_rd32(dev, 0x40080c); - trap.data2 = nv_rd32(dev, 0x400810); - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP_DISPATCH_FAULT", &trap); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); - } - nv_wr32(dev, 0x400808, 0); - } else if (display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); - } - nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); - nv_wr32(dev, 0x400848, 0); - ustatus &= ~0x00000001; - } - if (ustatus & 0x00000002) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); - nv_wr32(dev, 0x400500, 0); - if (nv_rd32(dev, 0x40084c) & 0x80000000) { - if (display) { - if (nouveau_graph_trapped_channel(dev, &trap.channel)) - trap.channel = -1; - trap.class = nv_rd32(dev, 0x400814); - trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; - trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; - trap.data = nv_rd32(dev, 0x40085c); - trap.data2 = 0; - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP_DISPATCH_QUERY", &trap); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); - } - nv_wr32(dev, 0x40084c, 0); - } else if (display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); - } - ustatus &= ~0x00000002; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x400804, 0xc0000000); - nv_wr32(dev, 0x400108, 0x001); - status &= ~0x001; - } - - /* TRAPs other than dispatch use the "normal" trap regs. */ - if (status && display) { - nouveau_graph_trap_info(dev, &trap); - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP", &trap); - } - - /* M2MF: Memory to memory copy engine. */ - if (status & 0x002) { - ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); - ustatus &= ~0x00000001; - } - if (ustatus & 0x00000002) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); - ustatus &= ~0x00000002; - } - if (ustatus & 0x00000004) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); - ustatus &= ~0x00000004; - } - NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x406804), - nv_rd32(dev, 0x406808), - nv_rd32(dev, 0x40680c), - nv_rd32(dev, 0x406810)); - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); - /* No sane way found yet -- just reset the bugger. */ - nv_wr32(dev, 0x400040, 2); - nv_wr32(dev, 0x400040, 0); - nv_wr32(dev, 0x406800, 0xc0000000); - nv_wr32(dev, 0x400108, 0x002); - status &= ~0x002; - } - - /* VFETCH: Fetches data from vertex buffers. */ - if (status & 0x004) { - ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x400c00), - nv_rd32(dev, 0x400c08), - nv_rd32(dev, 0x400c0c), - nv_rd32(dev, 0x400c10)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x400c04, 0xc0000000); - nv_wr32(dev, 0x400108, 0x004); - status &= ~0x004; - } - - /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ - if (status & 0x008) { - ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x401804), - nv_rd32(dev, 0x401808), - nv_rd32(dev, 0x40180c), - nv_rd32(dev, 0x401810)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); - /* No sane way found yet -- just reset the bugger. */ - nv_wr32(dev, 0x400040, 0x80); - nv_wr32(dev, 0x400040, 0); - nv_wr32(dev, 0x401800, 0xc0000000); - nv_wr32(dev, 0x400108, 0x008); - status &= ~0x008; - } - - /* CCACHE: Handles code and c[] caches and fills them. */ - if (status & 0x010) { - ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", - nv_rd32(dev, 0x405800), - nv_rd32(dev, 0x405804), - nv_rd32(dev, 0x405808), - nv_rd32(dev, 0x40580c), - nv_rd32(dev, 0x405810), - nv_rd32(dev, 0x405814), - nv_rd32(dev, 0x40581c)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x405018, 0xc0000000); - nv_wr32(dev, 0x400108, 0x010); - status &= ~0x010; - } - - /* Unknown, not seen yet... 0x402000 is the only trap status reg - * remaining, so try to handle it anyway. Perhaps related to that - * unknown DMA slot on tesla? */ - if (status & 0x20) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); - ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; - if (display) - NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x402000, 0xc0000000); - /* no status modifiction on purpose */ - } - - /* TEXTURE: CUDA texturing units */ - if (status & 0x040) { - nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, - "PGRAPH_TRAP_TEXTURE"); - nv_wr32(dev, 0x400108, 0x040); - status &= ~0x040; - } - - /* MP: CUDA execution engines. */ - if (status & 0x080) { - nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, - "PGRAPH_TRAP_MP"); - nv_wr32(dev, 0x400108, 0x080); - status &= ~0x080; - } - - /* TPDMA: Handles TP-initiated uncached memory accesses: - * l[], g[], stack, 2d surfaces, render targets. */ - if (status & 0x100) { - nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, - "PGRAPH_TRAP_TPDMA"); - nv_wr32(dev, 0x400108, 0x100); - status &= ~0x100; - } - - if (status) { - if (display) - NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", - status); - nv_wr32(dev, 0x400108, status); - } -} - -/* There must be a *lot* of these. Will take some time to gather them up. */ -static struct nouveau_enum_names nv50_data_error_names[] = -{ - { 4, "INVALID_VALUE" }, - { 5, "INVALID_ENUM" }, - { 8, "INVALID_OBJECT" }, - { 0xc, "INVALID_BITFIELD" }, - { 0x28, "MP_NO_REG_SPACE" }, - { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, -}; - -static void -nv50_pgraph_irq_handler(struct drm_device *dev) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; - uint32_t status; - - while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { - /* NOTIFY: You've set a NOTIFY an a command and it's done. */ - if (status & 0x00000001) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_NOTIFY", &trap); - status &= ~0x00000001; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); - } - - /* COMPUTE_QUERY: Purpose and exact cause unknown, happens - * when you write 0x200 to 0x50c0 method 0x31c. */ - if (status & 0x00000002) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_COMPUTE_QUERY", &trap); - status &= ~0x00000002; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); - } - - /* Unknown, never seen: 0x4 */ - - /* ILLEGAL_MTHD: You used a wrong method for this class. */ - if (status & 0x00000010) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - if (unhandled && nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_ILLEGAL_MTHD", &trap); - status &= ~0x00000010; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); - } - - /* ILLEGAL_CLASS: You used a wrong class. */ - if (status & 0x00000020) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_ILLEGAL_CLASS", &trap); - status &= ~0x00000020; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); - } - - /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ - if (status & 0x00000040) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_DOUBLE_NOTIFY", &trap); - status &= ~0x00000040; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); - } - - /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ - if (status & 0x00001000) { - nv_wr32(dev, 0x400500, 0x00000000); - nv_wr32(dev, NV03_PGRAPH_INTR, - NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, - NV40_PGRAPH_INTR_EN) & - ~NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, 0x400500, 0x00010001); - - nv50_graph_context_switch(dev); - - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - } - - /* BUFFER_NOTIFY: Your m2mf transfer finished */ - if (status & 0x00010000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_BUFFER_NOTIFY", &trap); - status &= ~0x00010000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); - } - - /* DATA_ERROR: Invalid value for this method, or invalid - * state in current PGRAPH context for this operation */ - if (status & 0x00100000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) { - nouveau_graph_dump_trap_info(dev, - "PGRAPH_DATA_ERROR", &trap); - NV_INFO (dev, "PGRAPH_DATA_ERROR - "); - nouveau_print_enum_names(nv_rd32(dev, 0x400110), - nv50_data_error_names); - printk("\n"); - } - status &= ~0x00100000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); - } - /* TRAP: Something bad happened in the middle of command - * execution. Has a billion types, subtypes, and even - * subsubtypes. */ - if (status & 0x00200000) { - nv50_pgraph_trap_handler(dev); - status &= ~0x00200000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); - } - - /* Unknown, never seen: 0x00400000 */ - - /* SINGLE_STEP: Happens on every method if you turned on - * single stepping in 40008c */ - if (status & 0x01000000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_SINGLE_STEP", &trap); - status &= ~0x01000000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); - } - - /* 0x02000000 happens when you pause a ctxprog... - * but the only way this can happen that I know is by - * poking the relevant MMIO register, and we don't - * do that. */ - - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", - status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } - - { - const int isb = (1 << 16) | (1 << 0); - - if ((nv_rd32(dev, 0x400500) & isb) != isb) - nv_wr32(dev, 0x400500, - nv_rd32(dev, 0x400500) | isb); - } - } - - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); - if (nv_rd32(dev, 0x400824) & (1 << 31)) - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); + drm_irq_uninstall(dev); + if (dev_priv->msi_enabled) + pci_disable_msi(dev->pdev); } -static void -nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) +void +nouveau_irq_register(struct drm_device *dev, int status_bit, + void (*handler)(struct drm_device *)) { - if (crtc & 1) - nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; - if (crtc & 2) - nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + dev_priv->irq_handler[status_bit] = handler; + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } -irqreturn_t -nouveau_irq_handler(DRM_IRQ_ARGS) +void +nouveau_irq_unregister(struct drm_device *dev, int status_bit) { - struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t status; unsigned long flags; - status = nv_rd32(dev, NV03_PMC_INTR_0); - if (!status) - return IRQ_NONE; - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { - nouveau_fifo_irq_handler(dev); - status &= ~NV_PMC_INTR_0_PFIFO_PENDING; - } - - if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { - if (dev_priv->card_type >= NV_50) - nv50_pgraph_irq_handler(dev); - else - nouveau_pgraph_irq_handler(dev); - - status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; - } - - if (status & NV_PMC_INTR_0_CRTCn_PENDING) { - nouveau_crtc_irq_handler(dev, (status>>24)&3); - status &= ~NV_PMC_INTR_0_CRTCn_PENDING; - } - - if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING | - NV_PMC_INTR_0_NV50_I2C_PENDING)) { - nv50_display_irq_handler(dev); - status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING | - NV_PMC_INTR_0_NV50_I2C_PENDING); - } - - if (status) - NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); - + dev_priv->irq_handler[status_bit] = NULL; spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - - return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fe4a30dc4b42..224181193a1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -36,183 +36,112 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +#include "nouveau_mm.h" +#include "nouveau_vm.h" /* * NV10-NV40 tiling helpers */ static void -nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_mem_update_tile_region(struct drm_device *dev, + struct nouveau_tile_reg *tile, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_tile_reg *tile = &dev_priv->tile[i]; + int i = tile - dev_priv->tile.reg; + unsigned long save; - tile->addr = addr; - tile->size = size; - tile->used = !!pitch; - nouveau_fence_unref((void **)&tile->fence); + nouveau_fence_unref(&tile->fence); + if (tile->pitch) + pfb->free_tile_region(dev, i); + + if (pitch) + pfb->init_tile_region(dev, i, addr, size, pitch, flags); + + spin_lock_irqsave(&dev_priv->context_switch_lock, save); pfifo->reassign(dev, false); pfifo->cache_pull(dev, false); nouveau_wait_for_idle(dev); - pgraph->set_region_tiling(dev, i, addr, size, pitch); - pfb->set_region_tiling(dev, i, addr, size, pitch); + pfb->set_tile_region(dev, i); + pgraph->set_tile_region(dev, i); pfifo->cache_pull(dev, true); pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); } -struct nouveau_tile_reg * -nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, - uint32_t pitch) +static struct nouveau_tile_reg * +nv10_mem_get_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; - struct nouveau_tile_reg *found = NULL; - unsigned long i, flags; - - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - for (i = 0; i < pfb->num_tiles; i++) { - struct nouveau_tile_reg *tile = &dev_priv->tile[i]; - - if (tile->used) - /* Tile region in use. */ - continue; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - if (tile->fence && - !nouveau_fence_signalled(tile->fence, NULL)) - /* Pending tile region. */ - continue; - - if (max(tile->addr, addr) < - min(tile->addr + tile->size, addr + size)) - /* Kill an intersecting tile region. */ - nv10_mem_set_region_tiling(dev, i, 0, 0, 0); - - if (pitch && !found) { - /* Free tile region. */ - nv10_mem_set_region_tiling(dev, i, addr, size, pitch); - found = tile; - } - } + spin_lock(&dev_priv->tile.lock); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + if (!tile->used && + (!tile->fence || nouveau_fence_signalled(tile->fence))) + tile->used = true; + else + tile = NULL; - return found; + spin_unlock(&dev_priv->tile.lock); + return tile; } void -nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, - struct nouveau_fence *fence) -{ - if (fence) { - /* Mark it as pending. */ - tile->fence = fence; - nouveau_fence_ref(fence); - } - - tile->used = false; -} - -/* - * NV50 VM helpers - */ -int -nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, - uint32_t flags, uint64_t phys) +nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, + struct nouveau_fence *fence) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pgt; - unsigned block; - int i; - virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; - size = (size >> 16) << 1; - - phys |= ((uint64_t)flags << 32); - phys |= 1; - if (dev_priv->vram_sys_base) { - phys += dev_priv->vram_sys_base; - phys |= 0x30; - } - - while (size) { - unsigned offset_h = upper_32_bits(phys); - unsigned offset_l = lower_32_bits(phys); - unsigned pte, end; - - for (i = 7; i >= 0; i--) { - block = 1 << (i + 1); - if (size >= block && !(virt & (block - 1))) - break; + if (tile) { + spin_lock(&dev_priv->tile.lock); + if (fence) { + /* Mark it as pending. */ + tile->fence = fence; + nouveau_fence_ref(fence); } - offset_l |= (i << 7); - - phys += block << 15; - size -= block; - - while (block) { - pgt = dev_priv->vm_vram_pt[virt >> 14]; - pte = virt & 0x3ffe; - - end = pte + block; - if (end > 16384) - end = 16384; - block -= (end - pte); - virt += (end - pte); - - while (pte < end) { - nv_wo32(pgt, (pte * 4) + 0, offset_l); - nv_wo32(pgt, (pte * 4) + 4, offset_h); - pte += 2; - } - } - } - dev_priv->engine.instmem.flush(dev); - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush(dev, 6); - return 0; + tile->used = false; + spin_unlock(&dev_priv->tile.lock); + } } -void -nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) +struct nouveau_tile_reg * +nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pgt; - unsigned pages, pte, end; - - virt -= dev_priv->vm_vram_base; - pages = (size >> 16) << 1; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nouveau_tile_reg *tile, *found = NULL; + int i; - while (pages) { - pgt = dev_priv->vm_vram_pt[virt >> 29]; - pte = (virt & 0x1ffe0000ULL) >> 15; + for (i = 0; i < pfb->num_tiles; i++) { + tile = nv10_mem_get_tile_region(dev, i); - end = pte + pages; - if (end > 16384) - end = 16384; - pages -= (end - pte); - virt += (end - pte) << 15; + if (pitch && !found) { + found = tile; + continue; - while (pte < end) { - nv_wo32(pgt, (pte * 4), 0); - pte++; + } else if (tile && tile->pitch) { + /* Kill an unused tile region. */ + nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); } + + nv10_mem_put_tile_region(dev, tile, NULL); } - dev_priv->engine.instmem.flush(dev); - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush(dev, 6); + if (found) + nv10_mem_update_tile_region(dev, found, addr, size, + pitch, flags); + return found; } /* @@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev) return 0; } -static void -nv50_vram_preinit(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, parts, colbits, rowbitsa, rowbitsb, banks; - u64 rowsize, predicted; - u32 r0, r4, rt, ru; - - r0 = nv_rd32(dev, 0x100200); - r4 = nv_rd32(dev, 0x100204); - rt = nv_rd32(dev, 0x100250); - ru = nv_rd32(dev, 0x001540); - NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); - - for (i = 0, parts = 0; i < 8; i++) { - if (ru & (0x00010000 << i)) - parts++; - } - - colbits = (r4 & 0x0000f000) >> 12; - rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; - rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = ((r4 & 0x01000000) ? 8 : 4); - - rowsize = parts * banks * (1 << colbits) * 8; - predicted = rowsize << rowbitsa; - if (r0 & 0x00000004) - predicted += rowsize << rowbitsb; - - if (predicted != dev_priv->vram_size) { - NV_WARN(dev, "memory controller reports %dMiB VRAM\n", - (u32)(dev_priv->vram_size >> 20)); - NV_WARN(dev, "we calculated %dMiB VRAM\n", - (u32)(predicted >> 20)); - } - - dev_priv->vram_rblock_size = rowsize >> 12; - if (rt & 1) - dev_priv->vram_rblock_size *= 3; - - NV_DEBUG(dev, "rblock %lld bytes\n", - (u64)dev_priv->vram_rblock_size << 12); -} - -static void -nvaa_vram_preinit(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - /* To our knowledge, there's no large scale reordering of pages - * that occurs on IGP chipsets. - */ - dev_priv->vram_rblock_size = 1; -} - -static int +int nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -381,40 +255,25 @@ nouveau_mem_detect(struct drm_device *dev) if (dev_priv->card_type < NV_50) { dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; - } else - if (dev_priv->card_type < NV_C0) { - dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); - dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; - dev_priv->vram_size &= 0xffffffff00ll; - - switch (dev_priv->chipset) { - case 0xaa: - case 0xac: - case 0xaf: - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); - dev_priv->vram_sys_base <<= 12; - nvaa_vram_preinit(dev); - break; - default: - nv50_vram_preinit(dev); - break; - } } else { dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; dev_priv->vram_size *= nv_rd32(dev, 0x121c74); } - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); - if (dev_priv->vram_sys_base) { - NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", - dev_priv->vram_sys_base); - } - if (dev_priv->vram_size) return 0; return -ENOMEM; } +bool +nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) +{ + if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) + return true; + + return false; +} + #if __OS_HAS_AGP static unsigned long get_agp_mode(struct drm_device *dev, unsigned long mode) @@ -547,10 +406,6 @@ nouveau_mem_vram_init(struct drm_device *dev) if (ret) return ret; - ret = nouveau_mem_detect(dev); - if (ret) - return ret; - dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); ret = nouveau_ttm_global_init(dev_priv); @@ -566,13 +421,6 @@ nouveau_mem_vram_init(struct drm_device *dev) return ret; } - dev_priv->fb_available_size = dev_priv->vram_size; - dev_priv->fb_mappable_pages = dev_priv->fb_available_size; - if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) - dev_priv->fb_mappable_pages = - pci_resource_len(dev->pdev, 1); - dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - /* reserve space at end of VRAM for PRAMIN */ if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) @@ -583,6 +431,22 @@ nouveau_mem_vram_init(struct drm_device *dev) else dev_priv->ramin_rsvd_vram = (512 * 1024); + ret = dev_priv->engine.vram.init(dev); + if (ret) + return ret; + + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); + if (dev_priv->vram_sys_base) { + NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", + dev_priv->vram_sys_base); + } + + dev_priv->fb_available_size = dev_priv->vram_size; + dev_priv->fb_mappable_pages = dev_priv->fb_available_size; + if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) + dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); + dev_priv->fb_mappable_pages >>= PAGE_SHIFT; + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; @@ -799,3 +663,114 @@ nouveau_mem_timing_fini(struct drm_device *dev) kfree(mem->timing); } + +static int +nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_mm *mm; + u32 b_size; + int ret; + + p_size = (p_size << PAGE_SHIFT) >> 12; + b_size = dev_priv->vram_rblock_size >> 12; + + ret = nouveau_mm_init(&mm, 0, p_size, b_size); + if (ret) + return ret; + + man->priv = mm; + return 0; +} + +static int +nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) +{ + struct nouveau_mm *mm = man->priv; + int ret; + + ret = nouveau_mm_fini(&mm); + if (ret) + return ret; + + man->priv = NULL; + return 0; +} + +static void +nouveau_vram_manager_del(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + struct drm_device *dev = dev_priv->dev; + + vram->put(dev, (struct nouveau_vram **)&mem->mm_node); +} + +static int +nouveau_vram_manager_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + struct drm_device *dev = dev_priv->dev; + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_vram *node; + u32 size_nc = 0; + int ret; + + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) + size_nc = 1 << nvbo->vma.node->type; + + ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, + mem->page_alignment << PAGE_SHIFT, size_nc, + (nvbo->tile_flags >> 8) & 0xff, &node); + if (ret) + return ret; + + mem->mm_node = node; + mem->start = node->offset >> PAGE_SHIFT; + return 0; +} + +void +nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) +{ + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *r; + u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; + int i; + + mutex_lock(&mm->mutex); + list_for_each_entry(r, &mm->nodes, nl_entry) { + printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", + prefix, r->free ? "free" : "used", r->type, + ((u64)r->offset << 12), + (((u64)r->offset + r->length) << 12)); + total += r->length; + ttotal[r->type] += r->length; + if (r->free) + tfree[r->type] += r->length; + else + tused[r->type] += r->length; + } + mutex_unlock(&mm->mutex); + + printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); + for (i = 0; i < 3; i++) { + printk(KERN_DEBUG "%s type %d: 0x%010llx, " + "used 0x%010llx, free 0x%010llx\n", prefix, + i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); + } +} + +const struct ttm_mem_type_manager_func nouveau_vram_manager = { + nouveau_vram_manager_init, + nouveau_vram_manager_fini, + nouveau_vram_manager_new, + nouveau_vram_manager_del, + nouveau_vram_manager_debug +}; diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c new file mode 100644 index 000000000000..cdbb11eb701b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -0,0 +1,271 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +static inline void +region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) +{ + list_del(&a->nl_entry); + list_del(&a->fl_entry); + kfree(a); +} + +static struct nouveau_mm_node * +region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) +{ + struct nouveau_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + b->offset = a->offset; + b->length = size; + b->free = a->free; + b->type = a->type; + a->offset += size; + a->length -= size; + list_add_tail(&b->nl_entry, &a->nl_entry); + if (b->free) + list_add_tail(&b->fl_entry, &a->fl_entry); + return b; +} + +static struct nouveau_mm_node * +nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) +{ + struct nouveau_mm_node *prev, *next; + + /* try to merge with free adjacent entries of same type */ + prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.prev != &rmm->nodes) { + if (prev->free && prev->type == this->type) { + prev->length += this->length; + region_put(rmm, this); + this = prev; + } + } + + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next != &rmm->nodes) { + if (next->free && next->type == this->type) { + next->offset = this->offset; + next->length += this->length; + region_put(rmm, this); + this = next; + } + } + + return this; +} + +void +nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) +{ + u32 block_s, block_l; + + this->free = true; + list_add(&this->fl_entry, &rmm->free); + this = nouveau_mm_merge(rmm, this); + + /* any entirely free blocks now? we'll want to remove typing + * on them now so they can be use for any memory allocation + */ + block_s = roundup(this->offset, rmm->block_size); + if (block_s + rmm->block_size > this->offset + this->length) + return; + + /* split off any still-typed region at the start */ + if (block_s != this->offset) { + if (!region_split(rmm, this, block_s - this->offset)) + return; + } + + /* split off the soon-to-be-untyped block(s) */ + block_l = rounddown(this->length, rmm->block_size); + if (block_l != this->length) { + this = region_split(rmm, this, block_l); + if (!this) + return; + } + + /* mark as having no type, and retry merge with any adjacent + * untyped blocks + */ + this->type = 0; + nouveau_mm_merge(rmm, this); +} + +int +nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, + u32 align, struct nouveau_mm_node **pnode) +{ + struct nouveau_mm_node *this, *tmp, *next; + u32 splitoff, avail, alloc; + + list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next == &rmm->nodes) + next = NULL; + + /* skip wrongly typed blocks */ + if (this->type && this->type != type) + continue; + + /* account for alignment */ + splitoff = this->offset & (align - 1); + if (splitoff) + splitoff = align - splitoff; + + if (this->length <= splitoff) + continue; + + /* determine total memory available from this, and + * the next block (if appropriate) + */ + avail = this->length; + if (next && next->free && (!next->type || next->type == type)) + avail += next->length; + + avail -= splitoff; + + /* determine allocation size */ + if (size_nc) { + alloc = min(avail, size); + alloc = rounddown(alloc, size_nc); + if (alloc == 0) + continue; + } else { + alloc = size; + if (avail < alloc) + continue; + } + + /* untyped block, split off a chunk that's a multiple + * of block_size and type it + */ + if (!this->type) { + u32 block = roundup(alloc + splitoff, rmm->block_size); + if (this->length < block) + continue; + + this = region_split(rmm, this, block); + if (!this) + return -ENOMEM; + + this->type = type; + } + + /* stealing memory from adjacent block */ + if (alloc > this->length) { + u32 amount = alloc - (this->length - splitoff); + + if (!next->type) { + amount = roundup(amount, rmm->block_size); + + next = region_split(rmm, next, amount); + if (!next) + return -ENOMEM; + + next->type = type; + } + + this->length += amount; + next->offset += amount; + next->length -= amount; + if (!next->length) { + list_del(&next->nl_entry); + list_del(&next->fl_entry); + kfree(next); + } + } + + if (splitoff) { + if (!region_split(rmm, this, splitoff)) + return -ENOMEM; + } + + this = region_split(rmm, this, alloc); + if (this == NULL) + return -ENOMEM; + + this->free = false; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOMEM; +} + +int +nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) +{ + struct nouveau_mm *rmm; + struct nouveau_mm_node *heap; + + heap = kzalloc(sizeof(*heap), GFP_KERNEL); + if (!heap) + return -ENOMEM; + heap->free = true; + heap->offset = roundup(offset, block); + heap->length = rounddown(offset + length, block) - heap->offset; + + rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); + if (!rmm) { + kfree(heap); + return -ENOMEM; + } + rmm->block_size = block; + mutex_init(&rmm->mutex); + INIT_LIST_HEAD(&rmm->nodes); + INIT_LIST_HEAD(&rmm->free); + list_add(&heap->nl_entry, &rmm->nodes); + list_add(&heap->fl_entry, &rmm->free); + + *prmm = rmm; + return 0; +} + +int +nouveau_mm_fini(struct nouveau_mm **prmm) +{ + struct nouveau_mm *rmm = *prmm; + struct nouveau_mm_node *heap = + list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); + + if (!list_is_singular(&rmm->nodes)) + return -EBUSY; + + kfree(heap); + kfree(rmm); + *prmm = NULL; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h new file mode 100644 index 000000000000..250e642de0a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -0,0 +1,62 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_REGION_H__ +#define __NOUVEAU_REGION_H__ + +struct nouveau_mm_node { + struct list_head nl_entry; + struct list_head fl_entry; + struct list_head rl_entry; + + bool free; + int type; + + u32 offset; + u32 length; +}; + +struct nouveau_mm { + struct list_head nodes; + struct list_head free; + + struct mutex mutex; + + u32 block_size; +}; + +int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); +int nouveau_mm_fini(struct nouveau_mm **); +int nouveau_mm_pre(struct nouveau_mm *); +int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, + u32 align, struct nouveau_mm_node **); +void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); + +int nv50_vram_init(struct drm_device *); +int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, + u32 memtype, struct nouveau_vram **); +void nv50_vram_del(struct drm_device *, struct nouveau_vram **); +bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2cc59f8c658b..a050b7b69782 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t *b_offset) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; @@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -ENOMEM; } - offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; - if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { - target = NV_DMA_TARGET_VIDMEM; - } else - if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { - if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && - dev_priv->card_type < NV_50) { - ret = nouveau_sgdma_get_page(dev, offset, &offset); - if (ret) - return ret; - target = NV_DMA_TARGET_PCI; - } else { - target = NV_DMA_TARGET_AGP; - if (dev_priv->card_type >= NV_50) - offset += dev_priv->vm_gart_base; - } - } else { - NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", - chan->notifier_bo->bo.mem.mem_type); - return -EINVAL; - } + if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) + target = NV_MEM_TARGET_VRAM; + else + target = NV_MEM_TARGET_GART; + offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, - mem->size, NV_DMA_ACCESS_RW, target, + mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); @@ -185,11 +168,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, na->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); - if (ret) - return ret; - - return 0; + nouveau_channel_put(&chan); + return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd572adca02a..55c9fdcfa67f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -35,6 +35,102 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_ramht.h" +#include "nouveau_vm.h" + +struct nouveau_gpuobj_method { + struct list_head head; + u32 mthd; + int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); +}; + +struct nouveau_gpuobj_class { + struct list_head head; + struct list_head methods; + u32 id; + u32 engine; +}; + +int +nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_class *oc; + + oc = kzalloc(sizeof(*oc), GFP_KERNEL); + if (!oc) + return -ENOMEM; + + INIT_LIST_HEAD(&oc->methods); + oc->id = class; + oc->engine = engine; + list_add(&oc->head, &dev_priv->classes); + return 0; +} + +int +nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, + int (*exec)(struct nouveau_channel *, u32, u32, u32)) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id == class) + goto found; + } + + return -EINVAL; + +found: + om = kzalloc(sizeof(*om), GFP_KERNEL); + if (!om) + return -ENOMEM; + + om->mthd = mthd; + om->exec = exec; + list_add(&om->head, &oc->methods); + return 0; +} + +int +nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id != class) + continue; + + list_for_each_entry(om, &oc->methods, head) { + if (om->mthd == mthd) + return om->exec(chan, class, mthd, data); + } + } + + return -ENOENT; +} + +int +nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (chid > 0 && chid < dev_priv->engine.fifo.channels) + chan = dev_priv->channels.ptr[chid]; + if (chan) + ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} /* NVidia uses context objects to drive drawing operations. @@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct nouveau_gpuobj **gpuobj_ret) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *gpuobj; struct drm_mm_node *ramin = NULL; - int ret; + int ret, i; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", chan ? chan->id : -1, size, align, flags); - if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; @@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, spin_unlock(&dev_priv->ramin_lock); if (chan) { - NV_DEBUG(dev, "channel heap\n"); - ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); if (ramin) ramin = drm_mm_get_block(ramin, size, align); - if (!ramin) { nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } - } else { - NV_DEBUG(dev, "global heap\n"); - - /* allocate backing pages, sets vinst */ - ret = engine->instmem.populate(dev, gpuobj, &size); - if (ret) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return ret; - } - - /* try and get aperture space */ - do { - if (drm_mm_pre_get(&dev_priv->ramin_heap)) - return -ENOMEM; - spin_lock(&dev_priv->ramin_lock); - ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, - align, 0); - if (ramin == NULL) { - spin_unlock(&dev_priv->ramin_lock); - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - ramin = drm_mm_get_block_atomic(ramin, size, align); - spin_unlock(&dev_priv->ramin_lock); - } while (ramin == NULL); - - /* on nv50 it's ok to fail, we have a fallback path */ - if (!ramin && dev_priv->card_type < NV_50) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - } + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += ramin->start; - /* if we got a chunk of the aperture, map pages into it */ - gpuobj->im_pramin = ramin; - if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { - ret = engine->instmem.bind(dev, gpuobj); + gpuobj->cinst = ramin->start; + gpuobj->vinst = ramin->start + chan->ramin->vinst; + gpuobj->node = ramin; + } else { + ret = instmem->get(gpuobj, size, align); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } - } - /* calculate the various different addresses for the object */ - if (chan) { - gpuobj->pinst = chan->ramin->pinst; - if (gpuobj->pinst != ~0) - gpuobj->pinst += gpuobj->im_pramin->start; - - if (dev_priv->card_type < NV_50) { - gpuobj->cinst = gpuobj->pinst; - } else { - gpuobj->cinst = gpuobj->im_pramin->start; - gpuobj->vinst = gpuobj->im_pramin->start + - chan->ramin->vinst; - } - } else { - if (gpuobj->im_pramin) - gpuobj->pinst = gpuobj->im_pramin->start; - else + ret = -ENOSYS; + if (!(flags & NVOBJ_FLAG_DONT_MAP)) + ret = instmem->map(gpuobj); + if (ret) gpuobj->pinst = ~0; - gpuobj->cinst = 0xdeadbeef; + + gpuobj->cinst = NVOBJ_CINST_GLOBAL; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - int i; - for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } @@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev) NV_DEBUG(dev, "\n"); INIT_LIST_HEAD(&dev_priv->gpuobj_list); + INIT_LIST_HEAD(&dev_priv->classes); spin_lock_init(&dev_priv->ramin_lock); dev_priv->ramin_base = ~0; @@ -205,9 +252,20 @@ void nouveau_gpuobj_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om, *tm; + struct nouveau_gpuobj_class *oc, *tc; NV_DEBUG(dev, "\n"); + list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { + list_for_each_entry_safe(om, tm, &oc->methods, head) { + list_del(&om->head); + kfree(om); + } + list_del(&oc->head); + kfree(oc); + } + BUG_ON(!list_empty(&dev_priv->gpuobj_list)); } @@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref) container_of(ref, struct nouveau_gpuobj, refcount); struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; int i; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - if (gpuobj->im_backing) - engine->instmem.clear(dev, gpuobj); + if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { + if (gpuobj->node) { + instmem->unmap(gpuobj); + instmem->put(gpuobj); + } + } else { + if (gpuobj->node) { + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + spin_unlock(&dev_priv->ramin_lock); + } + } spin_lock(&dev_priv->ramin_lock); - if (gpuobj->im_pramin) - drm_mm_put_block(gpuobj->im_pramin); list_del(&gpuobj->list); spin_unlock(&dev_priv->ramin_lock); @@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, kref_init(&gpuobj->refcount); gpuobj->size = size; gpuobj->pinst = pinst; - gpuobj->cinst = 0xdeadbeef; + gpuobj->cinst = NVOBJ_CINST_GLOBAL; gpuobj->vinst = vinst; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { @@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) The method below creates a DMA object in instance RAM and returns a handle to it that can be used to set up context objects. */ -int -nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, - uint64_t offset, uint64_t size, int access, - int target, struct nouveau_gpuobj **gpuobj) + +void +nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, + u64 base, u64 size, int target, int access, + u32 type, u32 comp) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - int ret; + struct drm_nouveau_private *dev_priv = obj->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + u32 flags0; - NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", - chan->id, class, offset, size); - NV_DEBUG(dev, "access=%d target=%d\n", access, target); + flags0 = (comp << 29) | (type << 22) | class; + flags0 |= 0x00100000; + + switch (access) { + case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; + case NV_MEM_ACCESS_RW: + case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; + default: + break; + } switch (target) { - case NV_DMA_TARGET_AGP: - offset += dev_priv->gart_info.aper_base; + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00010000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; break; + case NV_MEM_TARGET_GART: + base += dev_priv->gart_info.aper_base; default: + flags0 &= ~0x00100000; break; } - ret = nouveau_gpuobj_new(dev, chan, - nouveau_gpuobj_class_instmem_size(dev, class), - 16, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); - return ret; - } + /* convert to base + limit */ + size = (base + size) - 1; - if (dev_priv->card_type < NV_50) { - uint32_t frame, adjust, pte_flags = 0; - - if (access != NV_DMA_ACCESS_RO) - pte_flags |= (1<<1); - adjust = offset & 0x00000fff; - frame = offset & ~0x00000fff; - - nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | - (access << 14) | (target << 16) | - class)); - nv_wo32(*gpuobj, 4, size - 1); - nv_wo32(*gpuobj, 8, frame | pte_flags); - nv_wo32(*gpuobj, 12, frame | pte_flags); - } else { - uint64_t limit = offset + size - 1; - uint32_t flags0, flags5; + nv_wo32(obj, offset + 0x00, flags0); + nv_wo32(obj, offset + 0x04, lower_32_bits(size)); + nv_wo32(obj, offset + 0x08, lower_32_bits(base)); + nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | + upper_32_bits(base)); + nv_wo32(obj, offset + 0x10, 0x00000000); + nv_wo32(obj, offset + 0x14, 0x00000000); - if (target == NV_DMA_TARGET_VIDMEM) { - flags0 = 0x00190000; - flags5 = 0x00010000; - } else { - flags0 = 0x7fc00000; - flags5 = 0x00080000; - } + pinstmem->flush(obj->dev); +} - nv_wo32(*gpuobj, 0, flags0 | class); - nv_wo32(*gpuobj, 4, lower_32_bits(limit)); - nv_wo32(*gpuobj, 8, lower_32_bits(offset)); - nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | - (upper_32_bits(offset) & 0xff)); - nv_wo32(*gpuobj, 20, flags5); - } +int +nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, + int target, int access, u32 type, u32 comp, + struct nouveau_gpuobj **pobj) +{ + struct drm_device *dev = chan->dev; + int ret; - instmem->flush(dev); + ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); + if (ret) + return ret; - (*gpuobj)->engine = NVOBJ_ENGINE_SW; - (*gpuobj)->class = class; + nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, + access, type, comp); return 0; } int -nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, - uint64_t offset, uint64_t size, int access, - struct nouveau_gpuobj **gpuobj, - uint32_t *o_ret) +nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, + u64 size, int access, int target, + struct nouveau_gpuobj **pobj) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *obj; + u32 flags0, flags2; int ret; - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || - (dev_priv->card_type >= NV_50 && - dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - offset + dev_priv->vm_gart_base, - size, access, NV_DMA_TARGET_AGP, - gpuobj); - if (o_ret) - *o_ret = 0; - } else - if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { - nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); - if (offset & ~0xffffffffULL) { - NV_ERROR(dev, "obj offset exceeds 32-bits\n"); - return -EINVAL; + if (dev_priv->card_type >= NV_50) { + u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; + u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; + + return nv50_gpuobj_dma_new(chan, class, base, size, + target, access, type, comp, pobj); + } + + if (target == NV_MEM_TARGET_GART) { + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + target = NV_MEM_TARGET_PCI_NOSNOOP; + base += dev_priv->gart_info.aper_base; + } else + if (base != 0) { + base = nouveau_sgdma_get_physical(dev, base); + target = NV_MEM_TARGET_PCI; + } else { + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj); + return 0; } - if (o_ret) - *o_ret = (uint32_t)offset; - ret = (*gpuobj != NULL) ? 0 : -EINVAL; - } else { - NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); - return -EINVAL; } - return ret; + flags0 = class; + flags0 |= 0x00003000; /* PT present, PT linear */ + flags2 = 0; + + switch (target) { + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + default: + break; + } + + switch (access) { + case NV_MEM_ACCESS_RO: + flags0 |= 0x00004000; + break; + case NV_MEM_ACCESS_WO: + flags0 |= 0x00008000; + default: + flags2 |= 0x00000002; + break; + } + + flags0 |= (base & 0x00000fff) << 20; + flags2 |= (base & 0xfffff000); + + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, flags0); + nv_wo32(obj, 0x04, size - 1); + nv_wo32(obj, 0x08, flags2); + nv_wo32(obj, 0x0c, flags2); + + obj->engine = NVOBJ_ENGINE_SW; + obj->class = class; + *pobj = obj; + return 0; } /* Context objects in the instance RAM have the following structure. @@ -495,82 +598,122 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, entry[5]: set to 0? */ +static int +nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, + struct nouveau_gpuobj **gpuobj_ret) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_gpuobj *gpuobj; + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + gpuobj->dev = chan->dev; + gpuobj->engine = NVOBJ_ENGINE_SW; + gpuobj->class = class; + kref_init(&gpuobj->refcount); + gpuobj->cinst = 0x40; + + spin_lock(&dev_priv->ramin_lock); + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); + *gpuobj_ret = gpuobj; + return 0; +} + int -nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, - struct nouveau_gpuobj **gpuobj) +nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_class *oc; + struct nouveau_gpuobj *gpuobj; int ret; NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id == class) + goto found; + } + + NV_ERROR(dev, "illegal object class: 0x%x\n", class); + return -EINVAL; + +found: + switch (oc->engine) { + case NVOBJ_ENGINE_SW: + ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); + if (ret) + return ret; + goto insert; + case NVOBJ_ENGINE_GR: + if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { + struct nouveau_pgraph_engine *pgraph = + &dev_priv->engine.graph; + + ret = pgraph->create_context(chan); + if (ret) + return ret; + } + break; + case NVOBJ_ENGINE_CRYPT: + if (!chan->crypt_ctx) { + struct nouveau_crypt_engine *pcrypt = + &dev_priv->engine.crypt; + + ret = pcrypt->create_context(chan); + if (ret) + return ret; + } + break; + } + ret = nouveau_gpuobj_new(dev, chan, nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, - gpuobj); + &gpuobj); if (ret) { - NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); + NV_ERROR(dev, "error creating gpuobj: %d\n", ret); return ret; } if (dev_priv->card_type >= NV_50) { - nv_wo32(*gpuobj, 0, class); - nv_wo32(*gpuobj, 20, 0x00010000); + nv_wo32(gpuobj, 0, class); + nv_wo32(gpuobj, 20, 0x00010000); } else { switch (class) { case NV_CLASS_NULL: - nv_wo32(*gpuobj, 0, 0x00001030); - nv_wo32(*gpuobj, 4, 0xFFFFFFFF); + nv_wo32(gpuobj, 0, 0x00001030); + nv_wo32(gpuobj, 4, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - nv_wo32(*gpuobj, 0, class); + nv_wo32(gpuobj, 0, class); #ifdef __BIG_ENDIAN - nv_wo32(*gpuobj, 8, 0x01000000); + nv_wo32(gpuobj, 8, 0x01000000); #endif } else { #ifdef __BIG_ENDIAN - nv_wo32(*gpuobj, 0, class | 0x00080000); + nv_wo32(gpuobj, 0, class | 0x00080000); #else - nv_wo32(*gpuobj, 0, class); + nv_wo32(gpuobj, 0, class); #endif } } } dev_priv->engine.instmem.flush(dev); - (*gpuobj)->engine = NVOBJ_ENGINE_GR; - (*gpuobj)->class = class; - return 0; -} - -int -nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, - struct nouveau_gpuobj **gpuobj_ret) -{ - struct drm_nouveau_private *dev_priv; - struct nouveau_gpuobj *gpuobj; - - if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - dev_priv = chan->dev->dev_private; - - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); - if (!gpuobj) - return -ENOMEM; - gpuobj->dev = chan->dev; - gpuobj->engine = NVOBJ_ENGINE_SW; - gpuobj->class = class; - kref_init(&gpuobj->refcount); - gpuobj->cinst = 0x40; + gpuobj->engine = oc->engine; + gpuobj->class = oc->id; - spin_lock(&dev_priv->ramin_lock); - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - spin_unlock(&dev_priv->ramin_lock); - *gpuobj_ret = gpuobj; - return 0; +insert: + ret = nouveau_ramht_insert(chan, handle, gpuobj); + if (ret) + NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret); + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; } static int @@ -585,7 +728,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); /* Base amount for object storage (4KiB enough?) */ - size = 0x1000; + size = 0x2000; base = 0; /* PGRAPH context */ @@ -624,9 +767,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret, i; + int ret; NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); @@ -637,16 +779,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; } - /* NV50 VM + /* NV50/NVC0 VM * - Allocate per-channel page-directory - * - Map GART and VRAM into the channel's address space at the - * locations determined during init. + * - Link with shared channel VM */ - if (dev_priv->card_type >= NV_50) { + if (dev_priv->chan_vm) { u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u64 vm_vinst = chan->ramin->vinst + pgd_offs; u32 vm_pinst = chan->ramin->pinst; - u32 pde; if (vm_pinst != ~0) vm_pinst += pgd_offs; @@ -655,29 +795,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, 0, &chan->vm_pd); if (ret) return ret; - for (i = 0; i < 0x4000; i += 8) { - nv_wo32(chan->vm_pd, i + 0, 0x00000000); - nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); - } - - nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, - &chan->vm_gart_pt); - pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; - nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); - nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - - pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], - &chan->vm_vram_pt[i]); - - nv_wo32(chan->vm_pd, pde + 0, - chan->vm_vram_pt[i]->vinst | 0x61); - nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - pde += 8; - } - instmem->flush(dev); + nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); } /* RAMHT */ @@ -700,9 +819,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* VRAM ctxdma */ if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->vm_end, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &vram); + 0, (1ULL << 40), NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; @@ -710,8 +828,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, } else { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &vram); + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VRAM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; @@ -728,21 +846,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* TT memory ctxdma */ if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->vm_end, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &tt); - if (ret) { - NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); - return ret; - } - } else - if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { - ret = nouveau_gpuobj_gart_dma_new(chan, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RW, &tt, NULL); + 0, (1ULL << 40), NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &tt); } else { - NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); - ret = -EINVAL; + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_GART, &tt); } if (ret) { @@ -763,9 +873,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, void nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - int i; NV_DEBUG(dev, "ch%d\n", chan->id); @@ -774,10 +882,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) nouveau_ramht_ref(NULL, &chan->ramht, chan); + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); - nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); @@ -791,147 +897,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev) struct nouveau_gpuobj *gpuobj; int i; - if (dev_priv->card_type < NV_50) { - dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); - if (!dev_priv->susres.ramin_copy) - return -ENOMEM; - - for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) - dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); - return 0; - } - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing) + if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) continue; - gpuobj->im_backing_suspend = vmalloc(gpuobj->size); - if (!gpuobj->im_backing_suspend) { + gpuobj->suspend = vmalloc(gpuobj->size); + if (!gpuobj->suspend) { nouveau_gpuobj_resume(dev); return -ENOMEM; } for (i = 0; i < gpuobj->size; i += 4) - gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); + gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); } return 0; } void -nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; - - if (dev_priv->card_type < NV_50) { - vfree(dev_priv->susres.ramin_copy); - dev_priv->susres.ramin_copy = NULL; - return; - } - - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing_suspend) - continue; - - vfree(gpuobj->im_backing_suspend); - gpuobj->im_backing_suspend = NULL; - } -} - -void nouveau_gpuobj_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj; int i; - if (dev_priv->card_type < NV_50) { - for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) - nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); - nouveau_gpuobj_suspend_cleanup(dev); - return; - } - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing_suspend) + if (!gpuobj->suspend) continue; for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); - dev_priv->engine.instmem.flush(dev); + nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); + + vfree(gpuobj->suspend); + gpuobj->suspend = NULL; } - nouveau_gpuobj_suspend_cleanup(dev); + dev_priv->engine.instmem.flush(dev); } int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_grobj_alloc *init = data; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_pgraph_object_class *grc; - struct nouveau_gpuobj *gr = NULL; struct nouveau_channel *chan; int ret; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); - if (init->handle == ~0) return -EINVAL; - grc = pgraph->grclass; - while (grc->id) { - if (grc->id == init->class) - break; - grc++; - } + chan = nouveau_channel_get(dev, file_priv, init->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); - if (!grc->id) { - NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); - return -EPERM; + if (nouveau_ramht_find(chan, init->handle)) { + ret = -EEXIST; + goto out; } - if (nouveau_ramht_find(chan, init->handle)) - return -EEXIST; - - if (!grc->software) - ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); - else - ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); + ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - return ret; - } - - ret = nouveau_ramht_insert(chan, init->handle, gr); - nouveau_gpuobj_ref(NULL, &gr); - if (ret) { - NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", - ret, init->channel, init->handle); - return ret; } - return 0; +out: + nouveau_channel_put(&chan); + return ret; } int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_gpuobj *gpuobj; struct nouveau_channel *chan; + int ret; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, objfree->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); - gpuobj = nouveau_ramht_find(chan, objfree->handle); - if (!gpuobj) - return -ENOENT; + /* Synchronize with the user channel */ + nouveau_channel_idle(chan); - nouveau_ramht_remove(chan, objfree->handle); - return 0; + ret = nouveau_ramht_remove(chan, objfree->handle); + nouveau_channel_put(&chan); + return ret; } u32 diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9f7b158f5825..d93814160bcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -27,6 +27,10 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +#ifdef CONFIG_ACPI +#include <linux/acpi.h> +#endif +#include <linux/power_supply.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> @@ -446,6 +450,25 @@ nouveau_hwmon_fini(struct drm_device *dev) #endif } +#ifdef CONFIG_ACPI +static int +nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) +{ + struct drm_nouveau_private *dev_priv = + container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); + struct drm_device *dev = dev_priv->dev; + struct acpi_bus_event *entry = (struct acpi_bus_event *)data; + + if (strcmp(entry->device_class, "ac_adapter") == 0) { + bool ac = power_supply_is_system_supplied(); + + NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); + } + + return NOTIFY_OK; +} +#endif + int nouveau_pm_init(struct drm_device *dev) { @@ -485,6 +508,10 @@ nouveau_pm_init(struct drm_device *dev) nouveau_sysfs_init(dev); nouveau_hwmon_init(dev); +#ifdef CONFIG_ACPI + pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; + register_acpi_notifier(&pm->acpi_nb); +#endif return 0; } @@ -503,6 +530,9 @@ nouveau_pm_fini(struct drm_device *dev) nouveau_perf_fini(dev); nouveau_volt_fini(dev); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&pm->acpi_nb); +#endif nouveau_hwmon_fini(dev); nouveau_sysfs_fini(dev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 2d8580927ca4..bef3e6910418 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | + ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); } else if (dev_priv->card_type < NV_50) { - ctx = (gpuobj->cinst >> 4) | + ctx = (gpuobj->pinst >> 4) | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } else { if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { - ctx = (gpuobj->cinst << 10) | 2; + ctx = (gpuobj->cinst << 10) | chan->id; } else { ctx = (gpuobj->cinst >> 4) | ((gpuobj->engine << @@ -214,18 +214,19 @@ out: spin_unlock_irqrestore(&chan->ramht->lock, flags); } -void +int nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) { struct nouveau_ramht_entry *entry; entry = nouveau_ramht_remove_entry(chan, handle); if (!entry) - return; + return -ENOENT; nouveau_ramht_remove_hash(chan, entry->handle); nouveau_gpuobj_ref(NULL, &entry->gpuobj); kfree(entry); + return 0; } struct nouveau_gpuobj * diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index b79cb5e1a8f1..c82de98fee0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, struct nouveau_gpuobj *); -extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); +extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); extern struct nouveau_gpuobj * nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 1b42541ca9e5..04e8fb795269 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -45,6 +45,11 @@ # define NV04_PFB_REF_CMD_REFRESH (1 << 0) #define NV04_PFB_PRE 0x001002d4 # define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) +#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i)) +# define NV20_PFB_ZCOMP_MODE_32 (4 << 24) +# define NV20_PFB_ZCOMP_EN (1 << 31) +# define NV25_PFB_ZCOMP_MODE_16 (1 << 20) +# define NV25_PFB_ZCOMP_MODE_32 (2 << 20) #define NV10_PFB_CLOSE_PAGE2 0x0010033c #define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) #define NV40_PFB_TILE(i) (0x00100600 + (i*16)) @@ -74,17 +79,6 @@ # define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 # define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 -/* DMA object defines */ -#define NV_DMA_ACCESS_RW 0 -#define NV_DMA_ACCESS_RO 1 -#define NV_DMA_ACCESS_WO 2 -#define NV_DMA_TARGET_VIDMEM 0 -#define NV_DMA_TARGET_PCI 2 -#define NV_DMA_TARGET_AGP 3 -/* The following is not a real value used by the card, it's changed by - * nouveau_object_dma_create */ -#define NV_DMA_TARGET_PCI_NONLINEAR 8 - /* Some object classes we care about in the drm */ #define NV_CLASS_DMA_FROM_MEMORY 0x00000002 #define NV_CLASS_DMA_TO_MEMORY 0x00000003 @@ -332,6 +326,7 @@ #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 #define NV03_PGRAPH_STATUS 0x004006B0 #define NV04_PGRAPH_STATUS 0x00400700 +# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 #define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 #define NV04_PGRAPH_TRAPPED_DATA 0x00400708 #define NV04_PGRAPH_SURFACE 0x0040070C @@ -378,6 +373,7 @@ #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) +#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) @@ -714,31 +710,32 @@ #define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 #define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 #define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 -#define NV50_PDISPLAY_INTR_EN 0x0061002c -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_INTR_EN_0 0x00610028 +#define NV50_PDISPLAY_INTR_EN_1 0x0061002c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040 #define NV50_PDISPLAY_UNK30_CTRL 0x00610030 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 -#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 -#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 -#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) -#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 -#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 -#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 -#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 -#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) -#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) +#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080) +#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084) +#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) +#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 +#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 +#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010 +#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204) +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001 +#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208) +#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c) #define NV50_PDISPLAY_CURSOR 0x00610270 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) @@ -746,15 +743,11 @@ #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 -#define NV50_PDISPLAY_CTRL_STATE 0x00610300 -#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 -#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc -#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 -#define NV50_PDISPLAY_CTRL_VAL 0x00610304 -#define NV50_PDISPLAY_UNK_380 0x00610380 -#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384 -#define NV50_PDISPLAY_UNK_388 0x00610388 -#define NV50_PDISPLAY_UNK_38C 0x0061038c +#define NV50_PDISPLAY_PIO_CTRL 0x00610300 +#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc +#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001 +#define NV50_PDISPLAY_PIO_DATA 0x00610304 #define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) #define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index d4ac97007038..9a250eb53098 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -14,7 +14,7 @@ struct nouveau_sgdma_be { dma_addr_t *pages; unsigned nr_pages; - unsigned pte_start; + u64 offset; bool bound; }; @@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be) } } -static inline unsigned -nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT); - - if (dev_priv->card_type < NV_50) - return pte + 2; - - return pte << 1; -} - static int nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) { @@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) NV_DEBUG(dev, "pg=0x%lx\n", mem->start); - pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); - nvbe->pte_start = pte; + nvbe->offset = mem->start << PAGE_SHIFT; + pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = nvbe->pages[i]; uint32_t offset_l = lower_32_bits(dma_offset); - uint32_t offset_h = upper_32_bits(dma_offset); - - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) { - nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); - pte += 1; - } else { - nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21); - nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff); - pte += 2; - } + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.flush(nvbe->dev); - - if (dev_priv->card_type == NV_50) { - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - } nvbe->bound = true; return 0; @@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be) if (!nvbe->bound) return 0; - pte = nvbe->pte_start; + pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; for (i = 0; i < nvbe->nr_pages; i++) { - dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; - - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) { - nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); - pte += 1; - } else { - nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); - nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); - pte += 2; - } - - dma_offset += NV_CTXDMA_PAGE_SIZE; - } - } - dev_priv->engine.instmem.flush(nvbe->dev); - - if (dev_priv->card_type == NV_50) { - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) + nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); } nvbe->bound = false; @@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be) } } +static int +nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + nvbe->offset = mem->start << PAGE_SHIFT; + + nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset, + nvbe->nr_pages << PAGE_SHIFT, nvbe->pages); + nvbe->bound = true; + return 0; +} + +static int +nv50_sgdma_unbind(struct ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + if (!nvbe->bound) + return 0; + + nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset, + nvbe->nr_pages << PAGE_SHIFT); + nvbe->bound = false; + return 0; +} + static struct ttm_backend_func nouveau_sgdma_backend = { .populate = nouveau_sgdma_populate, .clear = nouveau_sgdma_clear, @@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = { .destroy = nouveau_sgdma_destroy }; +static struct ttm_backend_func nv50_sgdma_backend = { + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nv50_sgdma_bind, + .unbind = nv50_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + struct ttm_backend * nouveau_sgdma_init_ttm(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_sgdma_be *nvbe; - if (!dev_priv->gart_info.sg_ctxdma) - return NULL; - nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); if (!nvbe) return NULL; nvbe->dev = dev; - nvbe->backend.func = &nouveau_sgdma_backend; - + if (dev_priv->card_type < NV_50) + nvbe->backend.func = &nouveau_sgdma_backend; + else + nvbe->backend.func = &nv50_sgdma_backend; return &nvbe->backend; } @@ -218,7 +209,6 @@ int nouveau_sgdma_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev->pdev; struct nouveau_gpuobj *gpuobj = NULL; uint32_t aper_size, obj_size; int i, ret; @@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev) obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; obj_size += 8; /* ctxdma header */ - } else { - /* 1 entire VM page table */ - aper_size = (512 * 1024 * 1024); - obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; - } - - ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); - return ret; - } - - dev_priv->gart_info.sg_dummy_page = - alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); - if (!dev_priv->gart_info.sg_dummy_page) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); - dev_priv->gart_info.sg_dummy_bus = - pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -EFAULT; - } + ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj); + if (ret) { + NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); + return ret; + } - if (dev_priv->card_type < NV_50) { - /* special case, allocated from global instmem heap so - * cinst is invalid, we use it on all channels though so - * cinst needs to be valid, set it the same as pinst - */ - gpuobj->cinst = gpuobj->pinst; - - /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and - * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE - * on those cards? */ nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | (1 << 12) /* PT present */ | (0 << 13) /* PT *not* linear */ | - (NV_DMA_ACCESS_RW << 14) | - (NV_DMA_TARGET_PCI << 16)); + (0 << 14) /* RW */ | + (2 << 16) /* PCI */); nv_wo32(gpuobj, 4, aper_size - 1); - for (i = 2; i < 2 + (aper_size >> 12); i++) { - nv_wo32(gpuobj, i * 4, - dev_priv->gart_info.sg_dummy_bus | 3); - } - } else { - for (i = 0; i < obj_size; i += 8) { - nv_wo32(gpuobj, i + 0, 0x00000000); - nv_wo32(gpuobj, i + 4, 0x00000000); - } + for (i = 2; i < 2 + (aper_size >> 12); i++) + nv_wo32(gpuobj, i * 4, 0x00000000); + + dev_priv->gart_info.sg_ctxdma = gpuobj; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + } else + if (dev_priv->chan_vm) { + ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024, + 12, NV_MEM_ACCESS_RW, + &dev_priv->gart_info.vma); + if (ret) + return ret; + + dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset; + dev_priv->gart_info.aper_size = 512 * 1024 * 1024; } - dev_priv->engine.instmem.flush(dev); dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; - dev_priv->gart_info.aper_base = 0; - dev_priv->gart_info.aper_size = aper_size; - dev_priv->gart_info.sg_ctxdma = gpuobj; return 0; } @@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - if (dev_priv->gart_info.sg_dummy_page) { - pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, - NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - unlock_page(dev_priv->gart_info.sg_dummy_page); - __free_page(dev_priv->gart_info.sg_dummy_page); - dev_priv->gart_info.sg_dummy_page = NULL; - dev_priv->gart_info.sg_dummy_bus = 0; - } - nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); + nouveau_vm_put(&dev_priv->gart_info.vma); } -int -nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +uint32_t +nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - int pte; + int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; - if (dev_priv->card_type < NV_50) { - *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK; - return 0; - } + BUG_ON(dev_priv->card_type >= NV_50); - NV_ERROR(dev, "Unimplemented on NV50\n"); - return -EINVAL; + return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | + (offset & NV_CTXDMA_PAGE_MASK); } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 049f755567e5..8eac943e8fd2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv04_fb_init; engine->fb.takedown = nv04_fb_takedown; - engine->graph.grclass = nv04_graph_grclass; engine->graph.init = nv04_graph_init; engine->graph.takedown = nv04_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.unload_context = nv04_graph_unload_context; engine->fifo.channels = 16; engine->fifo.init = nv04_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; @@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x10: engine->instmem.init = nv04_instmem_init; engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv10_graph_grclass; + engine->fb.init_tile_region = nv10_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv10_fb_free_tile_region; engine->graph.init = nv10_graph_init; engine->graph.takedown = nv10_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.fifo_access = nv04_graph_fifo_access; engine->graph.load_context = nv10_graph_load_context; engine->graph.unload_context = nv10_graph_unload_context; - engine->graph.set_region_tiling = nv10_graph_set_region_tiling; + engine->graph.set_tile_region = nv10_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x20: engine->instmem.init = nv04_instmem_init; engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv20_graph_grclass; + engine->fb.init_tile_region = nv10_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv10_fb_free_tile_region; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.fifo_access = nv04_graph_fifo_access; engine->graph.load_context = nv20_graph_load_context; engine->graph.unload_context = nv20_graph_unload_context; - engine->graph.set_region_tiling = nv20_graph_set_region_tiling; + engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x30: engine->instmem.init = nv04_instmem_init; engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv30_fb_init; engine->fb.takedown = nv30_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv30_graph_grclass; + engine->fb.init_tile_region = nv30_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv30_fb_free_tile_region; engine->graph.init = nv30_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv20_graph_destroy_context; engine->graph.load_context = nv20_graph_load_context; engine->graph.unload_context = nv20_graph_unload_context; - engine->graph.set_region_tiling = nv20_graph_set_region_tiling; + engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x40: case 0x60: @@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; @@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv40_fb_init; engine->fb.takedown = nv40_fb_takedown; - engine->fb.set_region_tiling = nv40_fb_set_region_tiling; - engine->graph.grclass = nv40_graph_grclass; + engine->fb.init_tile_region = nv30_fb_init_tile_region; + engine->fb.set_tile_region = nv40_fb_set_tile_region; + engine->fb.free_tile_region = nv30_fb_free_tile_region; engine->graph.init = nv40_graph_init; engine->graph.takedown = nv40_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv40_graph_destroy_context; engine->graph.load_context = nv40_graph_load_context; engine->graph.unload_context = nv40_graph_unload_context; - engine->graph.set_region_tiling = nv40_graph_set_region_tiling; + engine->graph.set_tile_region = nv40_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv40_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv40_fifo_create_context; - engine->fifo.destroy_context = nv40_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv40_fifo_load_context; engine->fifo.unload_context = nv40_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->pm.temp_get = nv40_temp_get; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv50_instmem_takedown; engine->instmem.suspend = nv50_instmem_suspend; engine->instmem.resume = nv50_instmem_resume; - engine->instmem.populate = nv50_instmem_populate; - engine->instmem.clear = nv50_instmem_clear; - engine->instmem.bind = nv50_instmem_bind; - engine->instmem.unbind = nv50_instmem_unbind; + engine->instmem.get = nv50_instmem_get; + engine->instmem.put = nv50_instmem_put; + engine->instmem.map = nv50_instmem_map; + engine->instmem.unmap = nv50_instmem_unmap; if (dev_priv->chipset == 0x50) engine->instmem.flush = nv50_instmem_flush; else @@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv50_fb_init; engine->fb.takedown = nv50_fb_takedown; - engine->graph.grclass = nv50_graph_grclass; engine->graph.init = nv50_graph_init; engine->graph.takedown = nv50_graph_takedown; engine->graph.fifo_access = nv50_graph_fifo_access; @@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->display.init = nv50_display_init; engine->display.destroy = nv50_display_destroy; engine->gpio.init = nv50_gpio_init; - engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.takedown = nv50_gpio_fini; engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; + engine->gpio.irq_register = nv50_gpio_irq_register; + engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_enable = nv50_gpio_irq_enable; switch (dev_priv->chipset) { - case 0xa3: - case 0xa5: - case 0xa8: - case 0xaf: - engine->pm.clock_get = nva3_pm_clock_get; - engine->pm.clock_pre = nva3_pm_clock_pre; - engine->pm.clock_set = nva3_pm_clock_set; - break; - default: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + case 0xa0: + case 0xaa: + case 0xac: + case 0x50: engine->pm.clock_get = nv50_pm_clock_get; engine->pm.clock_pre = nv50_pm_clock_pre; engine->pm.clock_set = nv50_pm_clock_set; break; + default: + engine->pm.clock_get = nva3_pm_clock_get; + engine->pm.clock_pre = nva3_pm_clock_pre; + engine->pm.clock_set = nva3_pm_clock_set; + break; } engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; @@ -406,16 +436,38 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.temp_get = nv84_temp_get; else engine->pm.temp_get = nv40_temp_get; + switch (dev_priv->chipset) { + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa0: + engine->crypt.init = nv84_crypt_init; + engine->crypt.takedown = nv84_crypt_fini; + engine->crypt.create_context = nv84_crypt_create_context; + engine->crypt.destroy_context = nv84_crypt_destroy_context; + engine->crypt.tlb_flush = nv84_crypt_tlb_flush; + break; + default: + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + break; + } + engine->vram.init = nv50_vram_init; + engine->vram.get = nv50_vram_new; + engine->vram.put = nv50_vram_del; + engine->vram.flags_valid = nv50_vram_flags_valid; break; case 0xC0: engine->instmem.init = nvc0_instmem_init; engine->instmem.takedown = nvc0_instmem_takedown; engine->instmem.suspend = nvc0_instmem_suspend; engine->instmem.resume = nvc0_instmem_resume; - engine->instmem.populate = nvc0_instmem_populate; - engine->instmem.clear = nvc0_instmem_clear; - engine->instmem.bind = nvc0_instmem_bind; - engine->instmem.unbind = nvc0_instmem_unbind; + engine->instmem.get = nvc0_instmem_get; + engine->instmem.put = nvc0_instmem_put; + engine->instmem.map = nvc0_instmem_map; + engine->instmem.unmap = nvc0_instmem_unmap; engine->instmem.flush = nvc0_instmem_flush; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; @@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nvc0_fb_init; engine->fb.takedown = nvc0_fb_takedown; - engine->graph.grclass = NULL; //nvc0_graph_grclass; engine->graph.init = nvc0_graph_init; engine->graph.takedown = nvc0_graph_takedown; engine->graph.fifo_access = nvc0_graph_fifo_access; @@ -453,7 +504,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.takedown = nouveau_stub_takedown; engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; + engine->gpio.irq_register = nv50_gpio_irq_register; + engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_enable = nv50_gpio_irq_enable; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); @@ -495,7 +552,7 @@ nouveau_card_init_channel(struct drm_device *dev) ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, - NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, + NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &gpuobj); if (ret) goto out_err; @@ -505,9 +562,10 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; - ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RW, &gpuobj, NULL); + ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART, + &gpuobj); if (ret) goto out_err; @@ -516,11 +574,11 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; + mutex_unlock(&dev_priv->channel->mutex); return 0; out_err: - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; + nouveau_channel_put(&dev_priv->channel); return ret; } @@ -567,6 +625,8 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out; engine = &dev_priv->engine; + spin_lock_init(&dev_priv->channels.lock); + spin_lock_init(&dev_priv->tile.lock); spin_lock_init(&dev_priv->context_switch_lock); /* Make the CRTCs and I2C buses accessible */ @@ -625,26 +685,28 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_fb; + /* PCRYPT */ + ret = engine->crypt.init(dev); + if (ret) + goto out_graph; + /* PFIFO */ ret = engine->fifo.init(dev); if (ret) - goto out_graph; + goto out_crypt; } ret = engine->display.create(dev); if (ret) goto out_fifo; - /* this call irq_preinstall, register irq handler and - * call irq_postinstall - */ - ret = drm_irq_install(dev); + ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); if (ret) - goto out_display; + goto out_vblank; - ret = drm_vblank_init(dev, 0); + ret = nouveau_irq_init(dev); if (ret) - goto out_irq; + goto out_vblank; /* what about PVIDEO/PCRTC/PRAMDAC etc? */ @@ -669,12 +731,16 @@ nouveau_card_init(struct drm_device *dev) out_fence: nouveau_fence_fini(dev); out_irq: - drm_irq_uninstall(dev); -out_display: + nouveau_irq_fini(dev); +out_vblank: + drm_vblank_cleanup(dev); engine->display.destroy(dev); out_fifo: if (!nouveau_noaccel) engine->fifo.takedown(dev); +out_crypt: + if (!nouveau_noaccel) + engine->crypt.takedown(dev); out_graph: if (!nouveau_noaccel) engine->graph.takedown(dev); @@ -713,12 +779,12 @@ static void nouveau_card_takedown(struct drm_device *dev) if (!engine->graph.accel_blocked) { nouveau_fence_fini(dev); - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; + nouveau_channel_put_unlocked(&dev_priv->channel); } if (!nouveau_noaccel) { engine->fifo.takedown(dev); + engine->crypt.takedown(dev); engine->graph.takedown(dev); } engine->fb.takedown(dev); @@ -737,7 +803,8 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_gpuobj_takedown(dev); nouveau_mem_vram_fini(dev); - drm_irq_uninstall(dev); + nouveau_irq_fini(dev); + drm_vblank_cleanup(dev); nouveau_pm_fini(dev); nouveau_bios_takedown(dev); @@ -1024,21 +1091,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, else getparam->value = NV_PCI; break; - case NOUVEAU_GETPARAM_FB_PHYSICAL: - getparam->value = dev_priv->fb_phys; - break; - case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam->value = dev_priv->gart_info.aper_base; - break; - case NOUVEAU_GETPARAM_PCI_PHYSICAL: - if (dev->sg) { - getparam->value = (unsigned long)dev->sg->virtual; - } else { - NV_ERROR(dev, "Requested PCIGART address, " - "while no PCIGART was created\n"); - return -EINVAL; - } - break; case NOUVEAU_GETPARAM_FB_SIZE: getparam->value = dev_priv->fb_available_size; break; @@ -1046,7 +1098,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, getparam->value = dev_priv->gart_info.aper_size; break; case NOUVEAU_GETPARAM_VM_VRAM_BASE: - getparam->value = dev_priv->vm_vram_base; + getparam->value = 0; /* deprecated */ break; case NOUVEAU_GETPARAM_PTIMER_TIME: getparam->value = dev_priv->engine.timer.read(dev); @@ -1054,6 +1106,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_HAS_BO_USAGE: getparam->value = 1; break; + case NOUVEAU_GETPARAM_HAS_PAGEFLIP: + getparam->value = (dev_priv->card_type < NV_50); + break; case NOUVEAU_GETPARAM_GRAPH_UNITS: /* NV40 and NV50 versions are quite different, but register * address is the same. User is supposed to know the card @@ -1087,8 +1142,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, } /* Wait until (value(reg) & mask) == val, up until timeout has hit */ -bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, - uint32_t reg, uint32_t mask, uint32_t val) +bool +nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; @@ -1102,10 +1158,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, return false; } +/* Wait until (value(reg) & mask) != val, up until timeout has hit */ +bool +nouveau_wait_ne(struct drm_device *dev, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + uint64_t start = ptimer->read(dev); + + do { + if ((nv_rd32(dev, reg) & mask) != val) + return true; + } while (ptimer->read(dev) - start < timeout); + + return false; +} + /* Waits for PGRAPH to go completely idle */ bool nouveau_wait_for_idle(struct drm_device *dev) { - if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t mask = ~0; + + if (dev_priv->card_type == NV_40) + mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; + + if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) { NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", nv_rd32(dev, NV04_PGRAPH_STATUS)); return false; diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c new file mode 100644 index 000000000000..fbe0fb13bc1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/ratelimit.h> + +#include "nouveau_util.h" + +static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); + +void +nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) +{ + while (bf->name) { + if (value & bf->mask) { + printk(" %s", bf->name); + value &= ~bf->mask; + } + + bf++; + } + + if (value) + printk(" (unknown bits 0x%08x)", value); +} + +void +nouveau_enum_print(const struct nouveau_enum *en, u32 value) +{ + while (en->name) { + if (value == en->value) { + printk("%s", en->name); + return; + } + + en++; + } + + printk("(unknown enum 0x%08x)", value); +} + +int +nouveau_ratelimit(void) +{ + return __ratelimit(&nouveau_ratelimit_state); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h new file mode 100644 index 000000000000..d9ceaea26f4b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_UTIL_H__ +#define __NOUVEAU_UTIL_H__ + +struct nouveau_bitfield { + u32 mask; + const char *name; +}; + +struct nouveau_enum { + u32 value; + const char *name; +}; + +void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); +void nouveau_enum_print(const struct nouveau_enum *, u32 value); +int nouveau_ratelimit(void); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c new file mode 100644 index 000000000000..07ab1749cf7d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -0,0 +1,421 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" +#include "nouveau_vm.h" + +void +nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mm_node *r; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + list_for_each_entry(r, &vram->regions, rl_entry) { + u64 phys = (u64)r->offset << 12; + u32 num = r->length >> bits; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->map(vma, pgt, vram, pte, len, phys); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + } + + vm->flush(vm); +} + +void +nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) +{ + nouveau_vm_map_at(vma, 0, vram); +} + +void +nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, + dma_addr_t *list) +{ + struct nouveau_vm *vm = vma->vm; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->map_sg(vma, pgt, pte, list, len); + + num -= len; + pte += len; + list += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vm->flush(vm); +} + +void +nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) +{ + struct nouveau_vm *vm = vma->vm; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->unmap(pgt, pte, len); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vm->flush(vm); +} + +void +nouveau_vm_unmap(struct nouveau_vma *vma) +{ + nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); +} + +static void +nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) +{ + struct nouveau_vm_pgd *vpgd; + struct nouveau_vm_pgt *vpgt; + struct nouveau_gpuobj *pgt; + u32 pde; + + for (pde = fpde; pde <= lpde; pde++) { + vpgt = &vm->pgt[pde - vm->fpde]; + if (--vpgt->refcount) + continue; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vm->unmap_pgt(vpgd->obj, pde); + } + + pgt = vpgt->obj; + vpgt->obj = NULL; + + mutex_unlock(&vm->mm->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm->mutex); + } +} + +static int +nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) +{ + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nouveau_vm_pgd *vpgd; + struct nouveau_gpuobj *pgt; + u32 pgt_size; + int ret; + + pgt_size = (1 << (vm->pgt_bits + 12)) >> type; + pgt_size *= 8; + + mutex_unlock(&vm->mm->mutex); + ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); + mutex_lock(&vm->mm->mutex); + if (unlikely(ret)) + return ret; + + /* someone beat us to filling the PDE while we didn't have the lock */ + if (unlikely(vpgt->refcount++)) { + mutex_unlock(&vm->mm->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm->mutex); + return 0; + } + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vm->map_pgt(vpgd->obj, type, pde, pgt); + } + + vpgt->page_shift = type; + vpgt->obj = pgt; + return 0; +} + +int +nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *vma) +{ + u32 align = (1 << page_shift) >> 12; + u32 msize = size >> 12; + u32 fpde, lpde, pde; + int ret; + + mutex_lock(&vm->mm->mutex); + ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); + if (unlikely(ret != 0)) { + mutex_unlock(&vm->mm->mutex); + return ret; + } + + fpde = (vma->node->offset >> vm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; + for (pde = fpde; pde <= lpde; pde++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + + if (likely(vpgt->refcount)) { + vpgt->refcount++; + continue; + } + + ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); + if (ret) { + if (pde != fpde) + nouveau_vm_unmap_pgt(vm, fpde, pde - 1); + nouveau_mm_put(vm->mm, vma->node); + mutex_unlock(&vm->mm->mutex); + vma->node = NULL; + return ret; + } + } + mutex_unlock(&vm->mm->mutex); + + vma->vm = vm; + vma->offset = (u64)vma->node->offset << 12; + vma->access = access; + return 0; +} + +void +nouveau_vm_put(struct nouveau_vma *vma) +{ + struct nouveau_vm *vm = vma->vm; + u32 fpde, lpde; + + if (unlikely(vma->node == NULL)) + return; + fpde = (vma->node->offset >> vm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; + + mutex_lock(&vm->mm->mutex); + nouveau_mm_put(vm->mm, vma->node); + vma->node = NULL; + nouveau_vm_unmap_pgt(vm, fpde, lpde); + mutex_unlock(&vm->mm->mutex); +} + +int +nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, + u8 pgt_bits, u8 spg_shift, u8 lpg_shift, + struct nouveau_vm **pvm) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vm *vm; + u64 mm_length = (offset + length) - mm_offset; + u32 block; + int ret; + + vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + if (dev_priv->card_type == NV_50) { + vm->map_pgt = nv50_vm_map_pgt; + vm->unmap_pgt = nv50_vm_unmap_pgt; + vm->map = nv50_vm_map; + vm->map_sg = nv50_vm_map_sg; + vm->unmap = nv50_vm_unmap; + vm->flush = nv50_vm_flush; + } else { + kfree(vm); + return -ENOSYS; + } + + vm->fpde = offset >> pgt_bits; + vm->lpde = (offset + length - 1) >> pgt_bits; + vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL); + if (!vm->pgt) { + kfree(vm); + return -ENOMEM; + } + + INIT_LIST_HEAD(&vm->pgd_list); + vm->dev = dev; + vm->refcount = 1; + vm->pgt_bits = pgt_bits - 12; + vm->spg_shift = spg_shift; + vm->lpg_shift = lpg_shift; + + block = (1 << pgt_bits); + if (length < block) + block = length; + + ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); + if (ret) { + kfree(vm); + return ret; + } + + *pvm = vm; + return 0; +} + +static int +nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm_pgd *vpgd; + int i; + + if (!pgd) + return 0; + + vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); + if (!vpgd) + return -ENOMEM; + + nouveau_gpuobj_ref(pgd, &vpgd->obj); + + mutex_lock(&vm->mm->mutex); + for (i = vm->fpde; i <= vm->lpde; i++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; + + if (!vpgt->obj) { + vm->unmap_pgt(pgd, i); + continue; + } + + vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj); + } + list_add(&vpgd->head, &vm->pgd_list); + mutex_unlock(&vm->mm->mutex); + return 0; +} + +static void +nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + + if (!pgd) + return; + + mutex_lock(&vm->mm->mutex); + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + if (vpgd->obj != pgd) + continue; + + list_del(&vpgd->head); + nouveau_gpuobj_ref(NULL, &vpgd->obj); + kfree(vpgd); + } + mutex_unlock(&vm->mm->mutex); +} + +static void +nouveau_vm_del(struct nouveau_vm *vm) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + nouveau_vm_unlink(vm, vpgd->obj); + } + WARN_ON(nouveau_mm_fini(&vm->mm) != 0); + + kfree(vm->pgt); + kfree(vm); +} + +int +nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, + struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm *vm; + int ret; + + vm = ref; + if (vm) { + ret = nouveau_vm_link(vm, pgd); + if (ret) + return ret; + + vm->refcount++; + } + + vm = *ptr; + *ptr = ref; + + if (vm) { + nouveau_vm_unlink(vm, pgd); + + if (--vm->refcount == 0) + nouveau_vm_del(vm); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h new file mode 100644 index 000000000000..b6755cfa7b71 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -0,0 +1,107 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_VM_H__ +#define __NOUVEAU_VM_H__ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +struct nouveau_vm_pgt { + struct nouveau_gpuobj *obj; + u32 page_shift; + u32 refcount; +}; + +struct nouveau_vm_pgd { + struct list_head head; + struct nouveau_gpuobj *obj; +}; + +struct nouveau_vma { + struct nouveau_vm *vm; + struct nouveau_mm_node *node; + u64 offset; + u32 access; +}; + +struct nouveau_vm { + struct drm_device *dev; + struct nouveau_mm *mm; + int refcount; + + struct list_head pgd_list; + atomic_t pgraph_refs; + atomic_t pcrypt_refs; + + struct nouveau_vm_pgt *pgt; + u32 fpde; + u32 lpde; + + u32 pgt_bits; + u8 spg_shift; + u8 lpg_shift; + + void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt); + void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde); + void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); + void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, + u32 pte, dma_addr_t *, u32 cnt); + void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); + void (*flush)(struct nouveau_vm *); +}; + +/* nouveau_vm.c */ +int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, + u8 pgt_bits, u8 spg_shift, u8 lpg_shift, + struct nouveau_vm **); +int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, + struct nouveau_gpuobj *pgd); +int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *); +void nouveau_vm_put(struct nouveau_vma *); +void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); +void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); +void nouveau_vm_unmap(struct nouveau_vma *); +void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); +void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, + dma_addr_t *); + +/* nv50_vm.c */ +void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt); +void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde); +void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); +void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, + u32 pte, dma_addr_t *, u32 cnt); +void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); +void nv50_vm_flush(struct nouveau_vm *); +void nv50_vm_flush_engine(struct drm_device *, int engine); + +#endif diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 40e180741629..297505eb98d5 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) if (dev_priv->card_type >= NV_30) regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); - regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; + if (dev_priv->card_type >= NV_10) + regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; + else + regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; /* Some misc regs */ if (dev_priv->card_type == NV_40) { @@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) if (nv_two_heads(dev)) NVSetOwner(dev, nv_crtc->index); + drm_vblank_pre_modeset(dev, nv_crtc->index); funcs->dpms(crtc, DRM_MODE_DPMS_OFF); NVBlankScreen(dev, nv_crtc->index, true); @@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) #endif funcs->dpms(crtc, DRM_MODE_DPMS_ON); + drm_vblank_post_modeset(dev, nv_crtc->index); } static void nv_crtc_destroy(struct drm_crtc *crtc) @@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = { .cursor_move = nv04_crtc_cursor_move, .gamma_set = nv_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, .destroy = nv_crtc_destroy, }; diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index ba6423f2ffcc..e000455e06d0 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) * use a 10ms timeout (guards against crtc being inactive, in * which case blank state would never change) */ - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000000)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) return -EBUSY; - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000001)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000001)) return -EBUSY; - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000000)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) return -EBUSY; udelay(100); diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index 9e28cf772e3c..1715e1464b7d 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -32,6 +32,9 @@ #include "nouveau_encoder.h" #include "nouveau_connector.h" +static void nv04_vblank_crtc0_isr(struct drm_device *); +static void nv04_vblank_crtc1_isr(struct drm_device *); + static void nv04_display_store_initial_head_owner(struct drm_device *dev) { @@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev) func->save(encoder); } + nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr); + nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr); return 0; } @@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev) NV_DEBUG_KMS(dev, "\n"); + nouveau_irq_unregister(dev, 24); + nouveau_irq_unregister(dev, 25); + /* Turn every CRTC off. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct drm_mode_set modeset = { @@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev) return 0; } +static void +nv04_vblank_crtc0_isr(struct drm_device *dev) +{ + nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 0); +} + +static void +nv04_vblank_crtc1_isr(struct drm_device *dev) +{ + nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 1); +} diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 33e4c9388bc1..7a1189371096 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -28,52 +28,39 @@ #include "nouveau_ramht.h" #include "nouveau_fbcon.h" -void +int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_copyarea(info, region); - return; - } + ret = RING_SPACE(chan, 4); + if (ret) + return ret; BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); OUT_RING(chan, (region->sy << 16) | region->sx); OUT_RING(chan, (region->dy << 16) | region->dx); OUT_RING(chan, (region->height << 16) | region->width); FIRE_RING(chan); + return 0; } -void +int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_fillrect(info, rect); - return; - } + ret = RING_SPACE(chan, 7); + if (ret) + return ret; BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); @@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) OUT_RING(chan, (rect->dx << 16) | rect->dy); OUT_RING(chan, (rect->width << 16) | rect->height); FIRE_RING(chan); + return 0; } -void +int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; @@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t dsize; uint32_t width; uint32_t *data = (uint32_t *)image->data; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (image->depth != 1) { - cfb_imageblit(info, image); - return; - } - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { - nouveau_fbcon_gpu_lockup(info); - } + if (image->depth != 1) + return -ENODEV; - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, 8); + if (ret) + return ret; width = ALIGN(image->width, 8); dsize = ALIGN(width * image->height, 32) >> 5; @@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; - if (RING_SPACE(chan, iter_len + 1)) { - nouveau_fbcon_gpu_lockup(info); - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, iter_len + 1); + if (ret) + return ret; BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); OUT_RINGp(chan, data, iter_len); @@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) } FIRE_RING(chan); -} - -static int -nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *obj = NULL; - int ret; - - ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); - nouveau_gpuobj_ref(NULL, &obj); - return ret; + return 0; } int @@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? - 0x0062 : 0x0042, NvCtxSurf2D); + ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, + dev_priv->card_type >= NV_10 ? + 0x0062 : 0x0042); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); + ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); + ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); + ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); + ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? - 0x009f : 0x005f, NvImageBlit); + ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, + dev_priv->chipset >= 0x11 ? + 0x009f : 0x005f); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 708293b7ddcd..f89d104698df 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" +#include "nouveau_util.h" #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) #define NV04_RAMFC__SIZE 32 @@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV03_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); /* Setup initial state */ @@ -151,10 +157,31 @@ void nv04_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + unsigned long flags; - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pfifo->reassign(dev, false); + /* Unload the context if it's the currently active one */ + if (pfifo->channel_id(dev) == chan->id) { + pfifo->disable(dev); + pfifo->unload_context(dev); + pfifo->enable(dev); + } + + /* Keep it from being rescheduled */ + nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); + + pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the channel resources */ + if (chan->user) { + iounmap(chan->user); + chan->user = NULL; + } nouveau_gpuobj_ref(NULL, &chan->ramfc); } @@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev) if (chid < 0 || chid >= dev_priv->engine.fifo.channels) return 0; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (!chan) { NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); return -EINVAL; @@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev) static void nv04_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } @@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } @@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev) return 0; } +void +nv04_fifo_fini(struct drm_device *dev) +{ + nv_wr32(dev, 0x2140, 0x00000000); + nouveau_irq_unregister(dev, 8); +} + +static bool +nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + struct nouveau_gpuobj *obj; + unsigned long flags; + const int subc = (addr >> 13) & 0x7; + const int mthd = addr & 0x1ffc; + bool handled = false; + u32 engine; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) + chan = dev_priv->channels.ptr[chid]; + if (unlikely(!chan)) + goto out; + + switch (mthd) { + case 0x0000: /* bind object to subchannel */ + obj = nouveau_ramht_find(chan, data); + if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) + break; + + chan->sw_subchannel[subc] = obj->class; + engine = 0x0000000f << (subc * 4); + + nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); + handled = true; + break; + default: + engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); + if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) + break; + + if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], + mthd, data)) + handled = true; + break; + } + +out: + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return handled; +} + +void +nv04_fifo_isr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + uint32_t status, reassign; + int cnt = 0; + + reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; + while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { + uint32_t chid, get; + + nv_wr32(dev, NV03_PFIFO_CACHES, 0); + + chid = engine->fifo.channel_id(dev); + get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t mthd, data; + int ptr; + + /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before + * wrapping on my G80 chips, but CACHE1 isn't big + * enough for this much data.. Tests show that it + * wraps around to the start at GET=0x800.. No clue + * as to why.. + */ + ptr = (get & 0x7ff) >> 2; + + if (dev_priv->card_type < NV_40) { + mthd = nv_rd32(dev, + NV04_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV04_PFIFO_CACHE1_DATA(ptr)); + } else { + mthd = nv_rd32(dev, + NV40_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV40_PFIFO_CACHE1_DATA(ptr)); + } + + if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { + NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " + "Mthd 0x%04x Data 0x%08x\n", + chid, (mthd >> 13) & 7, mthd & 0x1ffc, + data); + } + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_CACHE_ERROR); + + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, + nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + u32 dma_get = nv_rd32(dev, 0x003244); + u32 dma_put = nv_rd32(dev, 0x003240); + u32 push = nv_rd32(dev, 0x003220); + u32 state = nv_rd32(dev, 0x003228); + + if (dev_priv->card_type == NV_50) { + u32 ho_get = nv_rd32(dev, 0x003328); + u32 ho_put = nv_rd32(dev, 0x003320); + u32 ib_get = nv_rd32(dev, 0x003334); + u32 ib_put = nv_rd32(dev, 0x003330); + + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " + "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " + "State 0x%08x Push 0x%08x\n", + chid, ho_get, dma_get, ho_put, + dma_put, ib_get, ib_put, state, + push); + + /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ + nv_wr32(dev, 0x003364, 0x00000000); + if (dma_get != dma_put || ho_get != ho_put) { + nv_wr32(dev, 0x003244, dma_put); + nv_wr32(dev, 0x003328, ho_put); + } else + if (ib_get != ib_put) { + nv_wr32(dev, 0x003334, ib_put); + } + } else { + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " + "Put 0x%08x State 0x%08x Push 0x%08x\n", + chid, dma_get, dma_put, state, push); + + if (dma_get != dma_put) + nv_wr32(dev, 0x003244, dma_put); + } + + nv_wr32(dev, 0x003228, 0x00000000); + nv_wr32(dev, 0x003220, 0x00000001); + nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + } + + if (status & NV_PFIFO_INTR_SEMAPHORE) { + uint32_t sem; + + status &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_SEMAPHORE); + + sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + } + + if (dev_priv->card_type == NV_50) { + if (status & 0x00000010) { + nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); + status &= ~0x00000010; + nv_wr32(dev, 0x002100, 0x00000010); + } + } + + if (status) { + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", + status, chid); + nv_wr32(dev, NV03_PFIFO_INTR_0, status); + status = 0; + } + + nv_wr32(dev, NV03_PFIFO_CACHES, reassign); + } + + if (status) { + NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); + nv_wr32(dev, 0x2140, 0); + nv_wr32(dev, 0x140, 0); + } + + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); +} diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index c8973421b635..af75015068d6 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -26,6 +26,11 @@ #include "drm.h" #include "nouveau_drm.h" #include "nouveau_drv.h" +#include "nouveau_hw.h" +#include "nouveau_util.h" + +static int nv04_graph_register(struct drm_device *dev); +static void nv04_graph_isr(struct drm_device *dev); static uint32_t nv04_graph_ctx_regs[] = { 0x0040053c, @@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev) if (chid >= dev_priv->engine.fifo.channels) return NULL; - return dev_priv->fifos[chid]; + return dev_priv->channels.ptr[chid]; } -void +static void nv04_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev) struct nouveau_channel *chan = NULL; int chid; - pgraph->fifo_access(dev, false); nouveau_wait_for_idle(dev); /* If previous context is valid, we need to save it */ @@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = dev_priv->engine.fifo.channel_id(dev); - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (chan) nv04_graph_load_context(chan); - - pgraph->fifo_access(dev, true); } static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) @@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan) void nv04_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct graph_state *pgraph_ctx = chan->pgraph_ctx; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + /* Free the context resources */ kfree(pgraph_ctx); chan->pgraph_ctx = NULL; + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } int nv04_graph_load_context(struct nouveau_channel *chan) @@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; + int ret; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + ret = nv04_graph_register(dev); + if (ret) + return ret; + /* Enable PGRAPH interrupts */ + nouveau_irq_register(dev, 12, nv04_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev) void nv04_graph_takedown(struct drm_device *dev) { + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); } void @@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled) } static int -nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_set_ref(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { atomic_set(&chan->fence.last_sequence_irq, data); return 0; } +int +nv04_graph_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct drm_device *dev = chan->dev; + struct nouveau_page_flip_state s; + + if (!nouveau_finish_page_flip(chan, &s)) + nv_set_crtc_base(dev, s.crtc, + s.offset + s.y * s.pitch + s.x * s.bpp / 8); + + return 0; +} + /* * Software methods, why they are needed, and how they all work: * @@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, */ static void -nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) +nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) { struct drm_device *dev = chan->dev; - uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; - uint32_t tmp; + u32 tmp; tmp = nv_ri32(dev, instance); tmp &= ~mask; @@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) } static void -nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) +nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) { struct drm_device *dev = chan->dev; - uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; - uint32_t tmp, ctx1; + u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + u32 tmp, ctx1; int class, op, valid = 1; ctx1 = nv_ri32(dev, instance); @@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val } static int -nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_set_operation(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { if (data > 5) return 1; /* Old versions of the objects only accept first three operations. */ - if (data > 2 && grclass < 0x40) + if (data > 2 && class < 0x40) return 1; nv04_graph_set_ctx1(chan, 0x00038000, data << 15); /* changing operation changes set of objects needed for validation */ @@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { uint32_t min = data & 0xffff, max; uint32_t w = data >> 16; @@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { uint32_t min = data & 0xffff, max; uint32_t w = data >> 16; @@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, return 1; } -static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { - { 0x0150, nv04_graph_mthd_set_ref }, - {} -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { - { 0x0184, nv04_graph_mthd_bind_nv01_patt }, - { 0x0188, nv04_graph_mthd_bind_rop }, - { 0x018c, nv04_graph_mthd_bind_beta1 }, - { 0x0190, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv01_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x019c, nv04_graph_mthd_bind_surf_src }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv04_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { - { 0x0188, nv04_graph_mthd_bind_chroma }, - { 0x018c, nv04_graph_mthd_bind_clip }, - { 0x0190, nv04_graph_mthd_bind_nv04_patt }, - { 0x0194, nv04_graph_mthd_bind_rop }, - { 0x0198, nv04_graph_mthd_bind_beta1 }, - { 0x019c, nv04_graph_mthd_bind_beta4 }, - { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x03e4, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv01_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x0304, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x0304, nv04_graph_mthd_set_operation }, - {}, -}; +static int +nv04_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { - { 0x0184, nv04_graph_mthd_bind_clip }, - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; + if (dev_priv->engine.graph.registered) + return 0; -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { - { 0x0184, nv04_graph_mthd_bind_clip }, - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, + /* dvd subpicture */ + NVOBJ_CLASS(dev, 0x0038, GR); + + /* m2mf */ + NVOBJ_CLASS(dev, 0x0039, GR); + + /* nv03 gdirect */ + NVOBJ_CLASS(dev, 0x004b, GR); + NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 gdirect */ + NVOBJ_CLASS(dev, 0x004a, GR); + NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 imageblit */ + NVOBJ_CLASS(dev, 0x001f, GR); + NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src); + NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 imageblit */ + NVOBJ_CLASS(dev, 0x005f, GR); + NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 iifc */ + NVOBJ_CLASS(dev, 0x0060, GR); + NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf); + NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation); + + /* nv05 iifc */ + NVOBJ_CLASS(dev, 0x0064, GR); + + /* nv01 ifc */ + NVOBJ_CLASS(dev, 0x0021, GR); + NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 ifc */ + NVOBJ_CLASS(dev, 0x0061, GR); + NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv05 ifc */ + NVOBJ_CLASS(dev, 0x0065, GR); + + /* nv03 sifc */ + NVOBJ_CLASS(dev, 0x0036, GR); + NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 sifc */ + NVOBJ_CLASS(dev, 0x0076, GR); + NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv05 sifc */ + NVOBJ_CLASS(dev, 0x0066, GR); + + /* nv03 sifm */ + NVOBJ_CLASS(dev, 0x0037, GR); + NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation); + + /* nv04 sifm */ + NVOBJ_CLASS(dev, 0x0077, GR); + NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf); + NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation); + + /* null */ + NVOBJ_CLASS(dev, 0x0030, GR); + + /* surf2d */ + NVOBJ_CLASS(dev, 0x0042, GR); + + /* rop */ + NVOBJ_CLASS(dev, 0x0043, GR); + + /* beta1 */ + NVOBJ_CLASS(dev, 0x0012, GR); + + /* beta4 */ + NVOBJ_CLASS(dev, 0x0072, GR); + + /* cliprect */ + NVOBJ_CLASS(dev, 0x0019, GR); + + /* nv01 pattern */ + NVOBJ_CLASS(dev, 0x0018, GR); + + /* nv04 pattern */ + NVOBJ_CLASS(dev, 0x0044, GR); + + /* swzsurf */ + NVOBJ_CLASS(dev, 0x0052, GR); + + /* surf3d */ + NVOBJ_CLASS(dev, 0x0053, GR); + NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h); + NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v); + + /* nv03 tex_tri */ + NVOBJ_CLASS(dev, 0x0048, GR); + NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color); + NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta); + + /* tex_tri */ + NVOBJ_CLASS(dev, 0x0054, GR); + + /* multitex_tri */ + NVOBJ_CLASS(dev, 0x0055, GR); + + /* nv01 chroma */ + NVOBJ_CLASS(dev, 0x0017, GR); + + /* nv04 chroma */ + NVOBJ_CLASS(dev, 0x0057, GR); + + /* surf_dst */ + NVOBJ_CLASS(dev, 0x0058, GR); + + /* surf_src */ + NVOBJ_CLASS(dev, 0x0059, GR); + + /* surf_color */ + NVOBJ_CLASS(dev, 0x005a, GR); + + /* surf_zeta */ + NVOBJ_CLASS(dev, 0x005b, GR); + + /* nv01 line */ + NVOBJ_CLASS(dev, 0x001c, GR); + NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 line */ + NVOBJ_CLASS(dev, 0x005c, GR); + NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 tri */ + NVOBJ_CLASS(dev, 0x001d, GR); + NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 tri */ + NVOBJ_CLASS(dev, 0x005d, GR); + NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 rect */ + NVOBJ_CLASS(dev, 0x001e, GR); + NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 rect */ + NVOBJ_CLASS(dev, 0x005e, GR); + NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + + dev_priv->engine.graph.registered = true; + return 0; }; -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_surf_color }, - { 0x0190, nv04_graph_mthd_bind_surf_zeta }, - {}, +static struct nouveau_bitfield nv04_graph_intr[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + {} }; -static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { - { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, - { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, - {}, +static struct nouveau_bitfield nv04_graph_nstatus[] = +{ + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} }; -struct nouveau_pgraph_object_class nv04_graph_grclass[] = { - { 0x0038, false, NULL }, /* dvd subpicture */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ - { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ - { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ - { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ - { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ - { 0x0064, false, NULL }, /* nv05 iifc */ - { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ - { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ - { 0x0065, false, NULL }, /* nv05 ifc */ - { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ - { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ - { 0x0066, false, NULL }, /* nv05 sifc */ - { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ - { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ - { 0x0030, false, NULL }, /* null */ - { 0x0042, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0018, false, NULL }, /* nv01 pattern */ - { 0x0044, false, NULL }, /* nv04 pattern */ - { 0x0052, false, NULL }, /* swzsurf */ - { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ - { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ - { 0x0054, false, NULL }, /* tex_tri */ - { 0x0055, false, NULL }, /* multitex_tri */ - { 0x0017, false, NULL }, /* nv01 chroma */ - { 0x0057, false, NULL }, /* nv04 chroma */ - { 0x0058, false, NULL }, /* surf_dst */ - { 0x0059, false, NULL }, /* surf_src */ - { 0x005a, false, NULL }, /* surf_color */ - { 0x005b, false, NULL }, /* surf_zeta */ - { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ - { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ - { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ - { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ - { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ - { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ - { 0x506e, true, nv04_graph_mthds_sw }, +struct nouveau_bitfield nv04_graph_nsource[] = +{ + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, {} }; +static void +nv04_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x0f000000) >> 24; + u32 subc = (addr & 0x0000e000) >> 13; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_NOTIFY) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_NOTIFY; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv04_graph_context_switch(dev); + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv04_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv04_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 0b5ae297abde..b8e3edb5c063 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev) } int -nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *sz) +nv04_instmem_suspend(struct drm_device *dev) { return 0; } void -nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) -{ -} - -int -nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_resume(struct drm_device *dev) { - return 0; } int -nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_mm_node *ramin = NULL; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, size, align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + gpuobj->node = ramin; + gpuobj->vinst = ramin->start; return 0; } void -nv04_instmem_flush(struct drm_device *dev) +nv04_instmem_put(struct nouveau_gpuobj *gpuobj) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + gpuobj->node = NULL; + spin_unlock(&dev_priv->ramin_lock); } int -nv04_instmem_suspend(struct drm_device *dev) +nv04_instmem_map(struct nouveau_gpuobj *gpuobj) { + gpuobj->pinst = gpuobj->vinst; return 0; } void -nv04_instmem_resume(struct drm_device *dev) +nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) { } +void +nv04_instmem_flush(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index cc5cda44e501..f78181a59b4a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c @@ -3,23 +3,109 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +static struct drm_mm_node * +nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct drm_mm_node *mem; + int ret; + + ret = drm_mm_pre_get(&pfb->tag_heap); + if (ret) + return NULL; + + spin_lock(&dev_priv->tile.lock); + mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); + if (mem) + mem = drm_mm_get_block_atomic(mem, size, 0); + spin_unlock(&dev_priv->tile.lock); + + return mem; +} + +static void +nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->tile.lock); + drm_mm_put_block(mem); + spin_unlock(&dev_priv->tile.lock); +} + +void +nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); + + tile->addr = addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; + + if (dev_priv->card_type == NV_20) { + if (flags & NOUVEAU_GEM_TILE_ZETA) { + /* + * Allocate some of the on-die tag memory, + * used to store Z compression meta-data (most + * likely just a bitmap determining if a given + * tile is compressed or not). + */ + tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); + + if (tile->tag_mem) { + /* Enable Z compression */ + if (dev_priv->chipset >= 0x25) + tile->zcomp = tile->tag_mem->start | + (bpp == 16 ? + NV25_PFB_ZCOMP_MODE_16 : + NV25_PFB_ZCOMP_MODE_32); + else + tile->zcomp = tile->tag_mem->start | + NV20_PFB_ZCOMP_EN | + (bpp == 16 ? 0 : + NV20_PFB_ZCOMP_MODE_32); + } + + tile->addr |= 3; + } else { + tile->addr |= 1; + } + + } else { + tile->addr |= 1 << 31; + } +} + void -nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_fb_free_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - if (pitch) { - if (dev_priv->card_type >= NV_20) - addr |= 1; - else - addr |= 1 << 31; + if (tile->tag_mem) { + nv20_fb_free_tag(dev, tile->tag_mem); + tile->tag_mem = NULL; } - nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV10_PFB_TILE(i), addr); + tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; +} + +void +nv10_fb_set_tile_region(struct drm_device *dev, int i) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); + + if (dev_priv->card_type == NV_20) + nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); } int @@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev) pfb->num_tiles = NV10_PFB_TILE__SIZE; + if (dev_priv->card_type == NV_20) + drm_mm_init(&pfb->tag_heap, 0, + (dev_priv->chipset >= 0x25 ? + 64 * 1024 : 32 * 1024)); + /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); return 0; } @@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev) void nv10_fb_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + int i; + + for (i = 0; i < pfb->num_tiles; i++) + pfb->free_tile_region(dev, i); + + if (dev_priv->card_type == NV_20) + drm_mm_takedown(&pfb->tag_heap); } diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index f1b03ad58fd5..d2ecbff4bee1 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV03_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ @@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) return 0; } -void -nv10_fifo_destroy_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - - nouveau_gpuobj_ref(NULL, &chan->ramfc); -} - static void nv10_fifo_do_load_context(struct drm_device *dev, int chid) { @@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev) static void nv10_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } @@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 8e68c9731159..8c92edb7bbcd 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -26,6 +26,10 @@ #include "drm.h" #include "nouveau_drm.h" #include "nouveau_drv.h" +#include "nouveau_util.h" + +static int nv10_graph_register(struct drm_device *); +static void nv10_graph_isr(struct drm_device *); #define NV10_FIFO_NUMBER 32 @@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev) return 0; } -void +static void nv10_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_channel *chan = NULL; int chid; - pgraph->fifo_access(dev, false); nouveau_wait_for_idle(dev); /* If previous context is valid, we need to save it */ @@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (chan && chan->pgraph_ctx) nv10_graph_load_context(chan); - - pgraph->fifo_access(dev, true); } #define NV_WRITE_CTX(reg, val) do { \ @@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev) if (chid >= dev_priv->engine.fifo.channels) return NULL; - return dev_priv->fifos[chid]; + return dev_priv->channels.ptr[chid]; } int nv10_graph_create_context(struct nouveau_channel *chan) @@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan) void nv10_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct graph_state *pgraph_ctx = chan->pgraph_ctx; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + /* Free the context resources */ kfree(pgraph_ctx); chan->pgraph_ctx = NULL; + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } void -nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_graph_set_tile_region(struct drm_device *dev, int i) { - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1 << 31; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); } int nv10_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; - int i; + int ret, i; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + ret = nv10_graph_register(dev); + if (ret) + return ret; + + nouveau_irq_register(dev, 12, nv10_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv10_graph_set_region_tiling(dev, i, 0, 0, 0); + nv10_graph_set_tile_region(dev, i); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); @@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev) void nv10_graph_takedown(struct drm_device *dev) { + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); } static int -nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv17_graph_mthd_lma_window(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct graph_state *ctx = chan->pgraph_ctx; struct pipe_state *pipe = &ctx->pipe_state; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; uint32_t xfmode0, xfmode1; int i; @@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, nouveau_wait_for_idle(dev); - pgraph->fifo_access(dev, true); - return 0; } static int -nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; nouveau_wait_for_idle(dev); @@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, nv_wr32(dev, 0x004006b0, nv_rd32(dev, 0x004006b0) | 0x8 << 24); - pgraph->fifo_access(dev, true); + return 0; +} + +static int +nv10_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ + NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ + NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ + NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ + + /* celcius */ + if (dev_priv->chipset <= 0x10) { + NVOBJ_CLASS(dev, 0x0056, GR); + } else + if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { + NVOBJ_CLASS(dev, 0x0096, GR); + } else { + NVOBJ_CLASS(dev, 0x0099, GR); + NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); + } + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + + dev_priv->engine.graph.registered = true; return 0; } -static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { - { 0x1638, nv17_graph_mthd_lma_window }, - { 0x163c, nv17_graph_mthd_lma_window }, - { 0x1640, nv17_graph_mthd_lma_window }, - { 0x1644, nv17_graph_mthd_lma_window }, - { 0x1658, nv17_graph_mthd_lma_enable }, +struct nouveau_bitfield nv10_graph_intr[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + { NV_PGRAPH_INTR_ERROR, "ERROR" }, {} }; -struct nouveau_pgraph_object_class nv10_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x005f, false, NULL }, /* imageblit */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x0052, false, NULL }, /* swzsurf */ - { 0x0093, false, NULL }, /* surf3d */ - { 0x0094, false, NULL }, /* tex_tri */ - { 0x0095, false, NULL }, /* multitex_tri */ - { 0x0056, false, NULL }, /* celcius (nv10) */ - { 0x0096, false, NULL }, /* celcius (nv11) */ - { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ +struct nouveau_bitfield nv10_graph_nstatus[] = +{ + { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, {} }; + +static void +nv10_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv10_graph_context_switch(dev); + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 12ab9cd56eca..8464b76798d5 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -32,6 +32,10 @@ #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) +static int nv20_graph_register(struct drm_device *); +static int nv30_graph_register(struct drm_device *); +static void nv20_graph_isr(struct drm_device *); + static void nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + unsigned long flags; - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the context resources */ nv_wo32(pgraph->ctx_table, chan->id * 4, 0); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } int @@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev) } void -nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv20_graph_set_tile_region(struct drm_device *dev, int i) { - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); + + if (dev_priv->card_type == NV_20) { + nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); + } } int @@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev) nv20_graph_rdi(dev); + ret = nv20_graph_register(dev); + if (ret) { + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); + return ret; + } + + nouveau_irq_register(dev, 12, nv20_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev) nv_wr32(dev, 0x40009C , 0x00000040); if (dev_priv->chipset >= 0x25) { - nv_wr32(dev, 0x400890, 0x00080000); + nv_wr32(dev, 0x400890, 0x00a8cfff); nv_wr32(dev, 0x400610, 0x304B1FB6); - nv_wr32(dev, 0x400B80, 0x18B82880); + nv_wr32(dev, 0x400B80, 0x1cbd3883); nv_wr32(dev, 0x400B84, 0x44000000); nv_wr32(dev, 0x400098, 0x40000080); nv_wr32(dev, 0x400B88, 0x000000ff); + } else { - nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ + nv_wr32(dev, 0x400880, 0x0008c7df); nv_wr32(dev, 0x400094, 0x00000005); - nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ + nv_wr32(dev, 0x400B80, 0x45eae20e); nv_wr32(dev, 0x400B84, 0x24000000); nv_wr32(dev, 0x400098, 0x00000040); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); @@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv20_graph_set_region_tiling(dev, i, 0, 0, 0); + nv20_graph_set_tile_region(dev, i); - for (i = 0; i < 8; i++) { - nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); - nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, - nv_rd32(dev, 0x100300 + i * 4)); - } nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); @@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); } @@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev) return ret; } + ret = nv30_graph_register(dev); + if (ret) { + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); + return ret; + } + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctx_table->pinst >> 4); + nouveau_irq_register(dev, 12, nv20_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv20_graph_set_region_tiling(dev, i, 0, 0, 0); + nv20_graph_set_tile_region(dev, i); nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); @@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev) return 0; } -struct nouveau_pgraph_object_class nv20_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x009e, false, NULL }, /* swzsurf */ - { 0x0096, false, NULL }, /* celcius */ - { 0x0097, false, NULL }, /* kelvin (nv20) */ - { 0x0597, false, NULL }, /* kelvin (nv25) */ - {} -}; - -struct nouveau_pgraph_object_class nv30_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x038a, false, NULL }, /* ifc (nv30) */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0389, false, NULL }, /* sifm (nv30) */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0362, false, NULL }, /* surf2d (nv30) */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x039e, false, NULL }, /* swzsurf */ - { 0x0397, false, NULL }, /* rankine (nv30) */ - { 0x0497, false, NULL }, /* rankine (nv35) */ - { 0x0697, false, NULL }, /* rankine (nv34) */ - {} -}; +static int +nv20_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ + NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ + + /* kelvin */ + if (dev_priv->chipset < 0x25) + NVOBJ_CLASS(dev, 0x0097, GR); + else + NVOBJ_CLASS(dev, 0x0597, GR); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + + dev_priv->engine.graph.registered = true; + return 0; +} + +static int +nv30_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ + + /* rankine */ + if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0397, GR); + else + if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0697, GR); + else + if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0497, GR); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + + dev_priv->engine.graph.registered = true; + return 0; +} + +static void +nv20_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c index 4a3f2f095128..e0135f0e2144 100644 --- a/drivers/gpu/drm/nouveau/nv30_fb.c +++ b/drivers/gpu/drm/nouveau/nv30_fb.c @@ -29,6 +29,27 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +void +nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = addr | 1; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +void +nv30_fb_free_tile_region(struct drm_device *dev, int i) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = tile->limit = tile->pitch = 0; +} + static int calc_bias(struct drm_device *dev, int k, int i, int j) { @@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); /* Init the memory timing regs at 0x10037c/0x1003ac */ if (dev_priv->chipset == 0x30 || diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index 3cd07d8d5bd7..f3d9c0505f7b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c @@ -4,26 +4,22 @@ #include "nouveau_drm.h" void -nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv40_fb_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { case 0x40: - nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV10_PFB_TILE(i), addr); + nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); break; default: - nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV40_PFB_TILE(i), addr); + nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); break; } } @@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index d337b8b28cdd..c86e4d4e9b96 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV40_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_wi32(dev, fc + 0, chan->pushbuf_base); @@ -70,17 +75,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) return 0; } -void -nv40_fifo_destroy_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - - nouveau_gpuobj_ref(NULL, &chan->ramfc); -} - static void nv40_fifo_do_load_context(struct drm_device *dev, int chid) { @@ -279,6 +273,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) static void nv40_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } @@ -301,7 +296,7 @@ nv40_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7ee1b91569b8..0618846a97ce 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -29,6 +29,9 @@ #include "nouveau_drv.h" #include "nouveau_grctx.h" +static int nv40_graph_register(struct drm_device *); +static void nv40_graph_isr(struct drm_device *); + struct nouveau_channel * nv40_graph_channel(struct drm_device *dev) { @@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev) inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin_grctx && chan->ramin_grctx->pinst == inst) @@ -79,6 +82,22 @@ nv40_graph_create_context(struct nouveau_channel *chan) void nv40_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the context resources */ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } @@ -174,43 +193,39 @@ nv40_graph_unload_context(struct drm_device *dev) } void -nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv40_graph_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { case 0x44: case 0x4a: case 0x4e: - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); break; case 0x46: case 0x47: case 0x49: case 0x4b: - nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); - nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); - nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); - nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); + nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); break; default: - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); - nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); - nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); - nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); break; } } @@ -232,7 +247,7 @@ nv40_graph_init(struct drm_device *dev) struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_grctx ctx = {}; uint32_t vramsz, *cp; - int i, j; + int ret, i, j; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -256,9 +271,14 @@ nv40_graph_init(struct drm_device *dev) kfree(cp); + ret = nv40_graph_register(dev); + if (ret) + return ret; + /* No context present currently */ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); + nouveau_irq_register(dev, 12, nv40_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -347,7 +367,7 @@ nv40_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - nv40_graph_set_region_tiling(dev, i, 0, 0, 0); + nv40_graph_set_tile_region(dev, i); /* begin RAM config */ vramsz = pci_resource_len(dev->pdev, 0) - 1; @@ -390,26 +410,111 @@ nv40_graph_init(struct drm_device *dev) void nv40_graph_takedown(struct drm_device *dev) { + nouveau_irq_unregister(dev, 12); } -struct nouveau_pgraph_object_class nv40_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x3089, false, NULL }, /* sifm (nv40) */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x3062, false, NULL }, /* surf2d (nv40) */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x309e, false, NULL }, /* swzsurf */ - { 0x4097, false, NULL }, /* curie (nv40) */ - { 0x4497, false, NULL }, /* curie (nv44) */ - {} -}; +static int +nv40_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ + + /* curie */ + if (dev_priv->chipset >= 0x60 || + 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x4497, GR); + else + NVOBJ_CLASS(dev, 0x4097, GR); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + + dev_priv->engine.graph.registered = true; + return 0; +} + +static int +nv40_graph_isr_chid(struct drm_device *dev, u32 inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + int i; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->ramin_grctx) + continue; + + if (inst == chan->ramin_grctx->pinst) + break; + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return i; +} + +static void +nv40_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4; + u32 chid = nv40_graph_isr_chid(dev, inst); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } else + if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { + nv_mask(dev, 0x402000, 0, 0); + } + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 56476d0c6de8..2c346f797285 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t buffer_handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_bo *cursor = NULL; struct drm_gem_object *gem; @@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nouveau_bo_unmap(cursor); - nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - - dev_priv->vm_vram_base); + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); nv_crtc->cursor.show(nv_crtc, true); out: @@ -437,6 +435,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = { .cursor_move = nv50_crtc_cursor_move, .gamma_set = nv50_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, .destroy = nv50_crtc_destroy, }; @@ -453,6 +452,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + drm_vblank_pre_modeset(dev, nv_crtc->index); nv50_crtc_blank(nv_crtc, true); } @@ -468,6 +468,7 @@ nv50_crtc_commit(struct drm_crtc *crtc) NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); nv50_crtc_blank(nv_crtc, false); + drm_vblank_post_modeset(dev, nv_crtc->index); ret = RING_SPACE(evo, 2); if (ret) { @@ -545,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, return -EINVAL; } - nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; + nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f624c611ddea..7cc94ed9ed95 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -33,6 +33,8 @@ #include "nouveau_ramht.h" #include "drm_crtc_helper.h" +static void nv50_display_isr(struct drm_device *); + static inline int nv50_sor_nr(struct drm_device *dev) { @@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev) return 4; } -static void -nv50_evo_channel_del(struct nouveau_channel **pchan) -{ - struct nouveau_channel *chan = *pchan; - - if (!chan) - return; - *pchan = NULL; - - nouveau_gpuobj_channel_takedown(chan); - nouveau_bo_unmap(chan->pushbuf_bo); - nouveau_bo_ref(NULL, &chan->pushbuf_bo); - - if (chan->user) - iounmap(chan->user); - - kfree(chan); -} - -static int -nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, - uint32_t tile_flags, uint32_t magic_flags, - uint32_t offset, uint32_t limit) -{ - struct drm_nouveau_private *dev_priv = evo->dev->dev_private; - struct drm_device *dev = evo->dev; - struct nouveau_gpuobj *obj = NULL; - int ret; - - ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); - if (ret) - return ret; - obj->engine = NVOBJ_ENGINE_DISPLAY; - - nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); - nv_wo32(obj, 4, limit); - nv_wo32(obj, 8, offset); - nv_wo32(obj, 12, 0x00000000); - nv_wo32(obj, 16, 0x00000000); - if (dev_priv->card_type < NV_C0) - nv_wo32(obj, 20, 0x00010000); - else - nv_wo32(obj, 20, 0x00020000); - dev_priv->engine.instmem.flush(dev); - - ret = nouveau_ramht_insert(evo, name, obj); - nouveau_gpuobj_ref(NULL, &obj); - if (ret) { - return ret; - } - - return 0; -} - -static int -nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramht = NULL; - struct nouveau_channel *chan; - int ret; - - chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); - if (!chan) - return -ENOMEM; - *pchan = chan; - - chan->id = -1; - chan->dev = dev; - chan->user_get = 4; - chan->user_put = 0; - - ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); - if (ret) { - NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = drm_mm_init(&chan->ramin_heap, 0, 32768); - if (ret) { - NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); - if (ret) { - NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_ramht_new(dev, ramht, &chan->ramht); - nouveau_gpuobj_ref(NULL, &ramht); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - if (dev_priv->chipset != 0x50) { - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - } - - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, - 0, dev_priv->vram_size); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, - false, true, &chan->pushbuf_bo); - if (ret == 0) - ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); - if (ret) { - NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_bo_map(chan->pushbuf_bo); - if (ret) { - NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - chan->user = ioremap(pci_resource_start(dev->pdev, 0) + - NV50_PDISPLAY_USER(0), PAGE_SIZE); - if (!chan->user) { - NV_ERROR(dev, "Error mapping EVO control regs.\n"); - nv50_evo_channel_del(pchan); - return -ENOMEM; - } - - return 0; -} - int nv50_display_early_init(struct drm_device *dev) { @@ -214,17 +63,16 @@ int nv50_display_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; - struct nouveau_channel *evo = dev_priv->evo; struct drm_connector *connector; - uint32_t val, ram_amount; - uint64_t start; + struct nouveau_channel *evo; int ret, i; + u32 val; NV_DEBUG_KMS(dev, "\n"); nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); + /* * I think the 0x006101XX range is some kind of main control area * that enables things. @@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev) val = nv_rd32(dev, 0x0061610c + (i * 0x800)); nv_wr32(dev, 0x0061019c + (i * 0x10), val); } + /* DAC */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); nv_wr32(dev, 0x006101d0 + (i * 0x04), val); } + /* SOR */ for (i = 0; i < nv50_sor_nr(dev); i++) { val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); nv_wr32(dev, 0x006101e0 + (i * 0x04), val); } + /* EXT */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); @@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); } - /* This used to be in crtc unblank, but seems out of place there. */ - nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); - /* RAM is clamped to 256 MiB. */ - ram_amount = dev_priv->vram_size; - NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); - if (ram_amount > 256*1024*1024) - ram_amount = 256*1024*1024; - nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); - nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000); - nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0); - /* The precise purpose is unknown, i suspect it has something to do * with text mode. */ @@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev) } } - /* taken from nv bug #12637, attempts to un-wedge the hw if it's - * stuck in some unspecified state - */ - start = ptimer->read(dev); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00); - while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) { - if ((val & 0x9f0000) == 0x20000) - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - val | 0x800000); - - if ((val & 0x3f0000) == 0x30000) - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - val | 0x200000); - - if (ptimer->read(dev) - start > 1000000000ULL) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", val); - return -EBUSY; - } - } - - nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); - if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - 0x40000000, 0x40000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); - return -EBUSY; - } - for (i = 0; i < 2; i++) { nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), @@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); + nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); + + /* enable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct nouveau_connector *conn = nouveau_connector(connector); - /* initialise fifo */ - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), - ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | - NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | - NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); - if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); - return -EBUSY; + if (conn->dcb->gpio_tag == 0xff) + continue; + + pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); } - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | - NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); - nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | - NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); - nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1); - - evo->dma.max = (4096/4) - 2; - evo->dma.put = 0; - evo->dma.cur = evo->dma.put; - evo->dma.free = evo->dma.max - evo->dma.cur; - - ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + + ret = nv50_evo_init(dev); if (ret) return ret; + evo = dev_priv->evo; - for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) - OUT_RING(evo, 0); + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); ret = RING_SPACE(evo, 11); if (ret) @@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev) if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) NV_ERROR(dev, "evo pushbuf stalled\n"); - /* enable clock change interrupts. */ - nv_wr32(dev, 0x610028, 0x00010001); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 | - NV50_PDISPLAY_INTR_EN_CLK_UNK20 | - NV50_PDISPLAY_INTR_EN_CLK_UNK40)); - - /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct nouveau_connector *conn = nouveau_connector(connector); - - if (conn->dcb->gpio_tag == 0xff) - continue; - - pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); - } return 0; } @@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); - nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); - if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); - } + nv50_evo_fini(dev); for (i = 0; i < 3; i++) { if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), @@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev) } /* disable interrupts. */ - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); /* disable hotplug interrupts */ nv_wr32(dev, 0xe054, 0xffffffff); @@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev) dev->mode_config.fb_base = dev_priv->fb_phys; - /* Create EVO channel */ - ret = nv50_evo_channel_new(dev, &dev_priv->evo); - if (ret) { - NV_ERROR(dev, "Error creating EVO channel: %d\n", ret); - return ret; - } - /* Create CRTC objects */ for (i = 0; i < 2; i++) nv50_crtc_create(dev, i); @@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev) } } + INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); + nouveau_irq_register(dev, 26, nv50_display_isr); + ret = nv50_display_init(dev); if (ret) { nv50_display_destroy(dev); @@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev) void nv50_display_destroy(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - NV_DEBUG_KMS(dev, "\n"); drm_mode_config_cleanup(dev); nv50_display_disable(dev); - nv50_evo_channel_del(&dev_priv->evo); + nouveau_irq_unregister(dev, 26); } static u16 @@ -660,32 +434,32 @@ static void nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - struct list_head *entry, *tmp; + struct nouveau_channel *chan, *tmp; - list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { - chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); + list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting, + nvsw.vbl_wait) { + if (chan->nvsw.vblsem_head != crtc) + continue; nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, chan->nvsw.vblsem_rval); list_del(&chan->nvsw.vbl_wait); + drm_vblank_put(dev, crtc); } + + drm_handle_vblank(dev, crtc); } static void nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) { - intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; - if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) nv50_display_vblank_crtc_handler(dev, 0); if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) nv50_display_vblank_crtc_handler(dev, 1); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, - NV50_PDISPLAY_INTR_EN) & ~intr); - nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC); } static void @@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work) static void nv50_display_error_handler(struct drm_device *dev) { - uint32_t addr, data; - - nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); - addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR); - data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA); - - NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n", - 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); - - nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); -} - -void -nv50_display_irq_hotplug_bh(struct work_struct *work) -{ - struct drm_nouveau_private *dev_priv = - container_of(work, struct drm_nouveau_private, hpd_work); - struct drm_device *dev = dev_priv->dev; - struct drm_connector *connector; - const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; - uint32_t unplug_mask, plug_mask, change_mask; - uint32_t hpd0, hpd1; - - spin_lock_irq(&dev_priv->hpd_state.lock); - hpd0 = dev_priv->hpd_state.hpd0_bits; - dev_priv->hpd_state.hpd0_bits = 0; - hpd1 = dev_priv->hpd_state.hpd1_bits; - dev_priv->hpd_state.hpd1_bits = 0; - spin_unlock_irq(&dev_priv->hpd_state.lock); - - hpd0 &= nv_rd32(dev, 0xe050); - if (dev_priv->chipset >= 0x90) - hpd1 &= nv_rd32(dev, 0xe070); - - plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); - unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); - change_mask = plug_mask | unplug_mask; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_encoder_helper_funcs *helper; - struct nouveau_connector *nv_connector = - nouveau_connector(connector); - struct nouveau_encoder *nv_encoder; - struct dcb_gpio_entry *gpio; - uint32_t reg; - bool plugged; - - if (!nv_connector->dcb) - continue; + u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; + u32 addr, data; + int chid; - gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); - if (!gpio || !(change_mask & (1 << gpio->line))) + for (chid = 0; chid < 5; chid++) { + if (!(channels & (1 << chid))) continue; - reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); - plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); - NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", - drm_get_connector_name(connector)) ; - - if (!connector->encoder || !connector->encoder->crtc || - !connector->encoder->crtc->enabled) - continue; - nv_encoder = nouveau_encoder(connector->encoder); - helper = connector->encoder->helper_private; - - if (nv_encoder->dcb->type != OUTPUT_DP) - continue; + nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); + addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); + data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); + NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " + "(0x%04x 0x%02x)\n", chid, + addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); - if (plugged) - helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); - else - helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); + nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); } - - drm_helper_hpd_irq_event(dev); } -void -nv50_display_irq_handler(struct drm_device *dev) +static void +nv50_display_isr(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t delayed = 0; - if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { - uint32_t hpd0_bits, hpd1_bits = 0; - - hpd0_bits = nv_rd32(dev, 0xe054); - nv_wr32(dev, 0xe054, hpd0_bits); - - if (dev_priv->chipset >= 0x90) { - hpd1_bits = nv_rd32(dev, 0xe074); - nv_wr32(dev, 0xe074, hpd1_bits); - } - - spin_lock(&dev_priv->hpd_state.lock); - dev_priv->hpd_state.hpd0_bits |= hpd0_bits; - dev_priv->hpd_state.hpd1_bits |= hpd1_bits; - spin_unlock(&dev_priv->hpd_state.lock); - - queue_work(dev_priv->wq, &dev_priv->hpd_work); - } - while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); @@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev) if (!intr0 && !(intr1 & ~delayed)) break; - if (intr0 & 0x00010000) { + if (intr0 & 0x001f0000) { nv50_display_error_handler(dev); - intr0 &= ~0x00010000; + intr0 &= ~0x001f0000; } if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { @@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev) } } } - diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index c551f0b85ee0..f0e30b78ef6b 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -35,9 +35,7 @@ #include "nouveau_crtc.h" #include "nv50_evo.h" -void nv50_display_irq_handler(struct drm_device *dev); void nv50_display_irq_handler_bh(struct work_struct *work); -void nv50_display_irq_hotplug_bh(struct work_struct *work); int nv50_display_early_init(struct drm_device *dev); void nv50_display_late_takedown(struct drm_device *dev); int nv50_display_create(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 000000000000..887b2a97e2a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -0,0 +1,318 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_ramht.h" + +static void +nv50_evo_channel_del(struct nouveau_channel **pevo) +{ + struct drm_nouveau_private *dev_priv; + struct nouveau_channel *evo = *pevo; + + if (!evo) + return; + *pevo = NULL; + + dev_priv = evo->dev->dev_private; + dev_priv->evo_alloc &= ~(1 << evo->id); + + nouveau_gpuobj_channel_takedown(evo); + nouveau_bo_unmap(evo->pushbuf_bo); + nouveau_bo_ref(NULL, &evo->pushbuf_bo); + + if (evo->user) + iounmap(evo->user); + + kfree(evo); +} + +int +nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, + u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) +{ + struct drm_nouveau_private *dev_priv = evo->dev->dev_private; + struct drm_device *dev = evo->dev; + struct nouveau_gpuobj *obj = NULL; + int ret; + + ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj); + if (ret) + return ret; + obj->engine = NVOBJ_ENGINE_DISPLAY; + + nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(obj, 4, limit); + nv_wo32(obj, 8, offset); + nv_wo32(obj, 12, 0x00000000); + nv_wo32(obj, 16, 0x00000000); + if (dev_priv->card_type < NV_C0) + nv_wo32(obj, 20, 0x00010000); + else + nv_wo32(obj, 20, 0x00020000); + dev_priv->engine.instmem.flush(dev); + + ret = nouveau_ramht_insert(evo, name, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) { + return ret; + } + + return 0; +} + +static int +nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo; + int ret; + + evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!evo) + return -ENOMEM; + *pevo = evo; + + for (evo->id = 0; evo->id < 5; evo->id++) { + if (dev_priv->evo_alloc & (1 << evo->id)) + continue; + + dev_priv->evo_alloc |= (1 << evo->id); + break; + } + + if (evo->id == 5) { + kfree(evo); + return -ENODEV; + } + + evo->dev = dev; + evo->user_get = 4; + evo->user_put = 0; + + ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, + false, true, &evo->pushbuf_bo); + if (ret == 0) + ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + ret = nouveau_bo_map(evo->pushbuf_bo); + if (ret) { + NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + evo->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); + if (!evo->user) { + NV_ERROR(dev, "Error mapping EVO control regs.\n"); + nv50_evo_channel_del(pevo); + return -ENOMEM; + } + + /* bind primary evo channel's ramht to the channel */ + if (dev_priv->evo && evo != dev_priv->evo) + nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL); + + return 0; +} + +static int +nv50_evo_channel_init(struct nouveau_channel *evo) +{ + struct drm_device *dev = evo->dev; + int id = evo->id, ret, i; + u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; + u32 tmp; + + tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x009f0000) == 0x00020000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); + + tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x003f0000) == 0x00030000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); + + /* initialise fifo */ + nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | + NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_EVO_DMA_CB_VALID); + nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); + nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + + nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { + NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); + return -EBUSY; + } + + /* enable error reporting on the channel */ + nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); + + evo->dma.max = (4096/4) - 2; + evo->dma.put = 0; + evo->dma.cur = evo->dma.put; + evo->dma.free = evo->dma.max - evo->dma.cur; + + ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(evo, 0); + + return 0; +} + +static void +nv50_evo_channel_fini(struct nouveau_channel *evo) +{ + struct drm_device *dev = evo->dev; + int id = evo->id; + + nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); + nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { + NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); + } +} + +static int +nv50_evo_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; + struct nouveau_channel *evo; + int ret; + + /* create primary evo channel, the one we use for modesetting + * purporses + */ + ret = nv50_evo_channel_new(dev, &dev_priv->evo); + if (ret) + return ret; + evo = dev_priv->evo; + + /* setup object management on it, any other evo channel will + * use this also as there's no per-channel support on the + * hardware + */ + ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, + NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = drm_mm_init(&evo->ramin_heap, 0, 32768); + if (ret) { + NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); + if (ret) { + NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nouveau_ramht_new(dev, ramht, &evo->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + /* create some default objects for the scanout memtypes we support */ + if (dev_priv->chipset != 0x50) { + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + } + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, + 0, dev_priv->vram_size); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + return 0; +} + +int +nv50_evo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (!dev_priv->evo) { + ret = nv50_evo_create(dev); + if (ret) + return ret; + } + + return nv50_evo_channel_init(dev_priv->evo); +} + +void +nv50_evo_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->evo) { + nv50_evo_channel_fini(dev_priv->evo); + nv50_evo_channel_del(&dev_priv->evo); + } +} diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h index aae13343bcec..aa4f0d3cea8e 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.h +++ b/drivers/gpu/drm/nouveau/nv50_evo.h @@ -24,6 +24,15 @@ * */ +#ifndef __NV50_EVO_H__ +#define __NV50_EVO_H__ + +int nv50_evo_init(struct drm_device *dev); +void nv50_evo_fini(struct drm_device *dev); +int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name, + u32 tile_flags, u32 magic_flags, + u32 offset, u32 limit); + #define NV50_EVO_UPDATE 0x00000080 #define NV50_EVO_UNK84 0x00000084 #define NV50_EVO_UNK84_NOTIFY 0x40000000 @@ -111,3 +120,4 @@ #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc +#endif diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index cd1988b15d2c..50290dea0ac4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -3,30 +3,75 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +struct nv50_fb_priv { + struct page *r100c08_page; + dma_addr_t r100c08; +}; + +static int +nv50_fb_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!priv->r100c08_page) { + kfree(priv); + return -ENOMEM; + } + + priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { + __free_page(priv->r100c08_page); + kfree(priv); + return -EFAULT; + } + + dev_priv->engine.fb.priv = priv; + return 0; +} + int nv50_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + int ret; + + if (!dev_priv->engine.fb.priv) { + ret = nv50_fb_create(dev); + if (ret) + return ret; + } + priv = dev_priv->engine.fb.priv; /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) * cause IOMMU "read from address 0" errors (rh#561267) */ - nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); + nv_wr32(dev, 0x100c08, priv->r100c08 >> 8); /* This is needed to get meaningful information from 100c90 * on traps. No idea what these values mean exactly. */ switch (dev_priv->chipset) { case 0x50: - nv_wr32(dev, 0x100c90, 0x0707ff); + nv_wr32(dev, 0x100c90, 0x000707ff); break; case 0xa3: case 0xa5: case 0xa8: - nv_wr32(dev, 0x100c90, 0x0d0fff); + nv_wr32(dev, 0x100c90, 0x000d0fff); + break; + case 0xaf: + nv_wr32(dev, 0x100c90, 0x089d1fff); break; default: - nv_wr32(dev, 0x100c90, 0x1d07ff); + nv_wr32(dev, 0x100c90, 0x001d07ff); break; } @@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev) void nv50_fb_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + + priv = dev_priv->engine.fb.priv; + if (!priv) + return; + dev_priv->engine.fb.priv = NULL; + + pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c08_page); + kfree(priv); } void nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; u32 trap[6], idx, chinst; int i, ch; @@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) return; chinst = (trap[2] << 16) | trap[1]; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { - struct nouveau_channel *chan = dev_priv->fifos[ch]; + struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; if (!chan || !chan->ramin) continue; @@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) if (chinst == chan->ramin->vinst >> 12) break; } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " "channel %d (0x%08x)\n", diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6dcf048eddbc..6d38cb1488ae 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -3,27 +3,20 @@ #include "nouveau_dma.h" #include "nouveau_ramht.h" #include "nouveau_fbcon.h" +#include "nouveau_mm.h" -void +int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && - RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_fillrect(info, rect); - return; - } + ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); + if (ret) + return ret; if (rect->rop != ROP_COPY) { BEGIN_RING(chan, NvSub2D, 0x02ac, 1); @@ -45,27 +38,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) OUT_RING(chan, 3); } FIRE_RING(chan); + return 0; } -void +int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_copyarea(info, region); - return; - } + ret = RING_SPACE(chan, 12); + if (ret) + return ret; BEGIN_RING(chan, NvSub2D, 0x0110, 1); OUT_RING(chan, 0); @@ -80,9 +67,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) OUT_RING(chan, 0); OUT_RING(chan, region->sy); FIRE_RING(chan); + return 0; } -void +int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; @@ -92,23 +80,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t width, dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (image->depth != 1) { - cfb_imageblit(info, image); - return; - } - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { - nouveau_fbcon_gpu_lockup(info); - } + if (image->depth != 1) + return -ENODEV; - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, 11); + if (ret) + return ret; width = ALIGN(image->width, 32); dwords = (width * image->height) >> 5; @@ -134,11 +113,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) while (dwords) { int push = dwords > 2047 ? 2047 : dwords; - if (RING_SPACE(chan, push + 1)) { - nouveau_fbcon_gpu_lockup(info); - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, push + 1); + if (ret) + return ret; dwords -= push; @@ -148,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) } FIRE_RING(chan); + return 0; } int @@ -157,12 +135,9 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; - struct nouveau_gpuobj *eng2d = NULL; - uint64_t fb; + struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; int ret, format; - fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; - switch (info->var.bits_per_pixel) { case 8: format = 0xf3; @@ -190,12 +165,7 @@ nv50_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); - if (ret) - return ret; - - ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); - nouveau_gpuobj_ref(NULL, &eng2d); + ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); if (ret) return ret; @@ -253,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(fb)); - OUT_RING(chan, lower_32_bits(fb)); + OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); BEGIN_RING(chan, NvSub2D, 0x0230, 2); OUT_RING(chan, format); OUT_RING(chan, 1); @@ -262,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(fb)); - OUT_RING(chan, lower_32_bits(fb)); + OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 1da65bd60c10..8dd04c5dac67 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" +#include "nouveau_vm.h" static void nv50_fifo_playlist_update(struct drm_device *dev) @@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev) /* We never schedule channel 0 or 127 */ for (i = 1, nr = 0; i < 127; i++) { - if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { + if (dev_priv->channels.ptr[i] && + dev_priv->channels.ptr[i]->ramfc) { nv_wo32(cur, (nr * 4), i); nr++; } @@ -60,7 +62,7 @@ static void nv50_fifo_channel_enable(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->fifos[channel]; + struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; uint32_t inst; NV_DEBUG(dev, "ch%d\n", channel); @@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); } @@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev) NV_DEBUG(dev, "\n"); for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { - if (dev_priv->fifos[i]) + if (dev_priv->channels.ptr[i]) nv50_fifo_channel_enable(dev, i); else nv50_fifo_channel_disable(dev, i); @@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev) if (!pfifo->playlist[0]) return; + nv_wr32(dev, 0x2140, 0x00000000); + nouveau_irq_unregister(dev, 8); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); } @@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan) } ramfc = chan->ramfc; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); @@ -291,10 +302,23 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_gpuobj *ramfc = NULL; + unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pfifo->reassign(dev, false); + + /* Unload the context if it's the currently active one */ + if (pfifo->channel_id(dev) == chan->id) { + pfifo->disable(dev); + pfifo->unload_context(dev); + pfifo->enable(dev); + } + /* This will ensure the channel is seen as disabled. */ nouveau_gpuobj_ref(chan->ramfc, &ramfc); nouveau_gpuobj_ref(NULL, &chan->ramfc); @@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) nv50_fifo_channel_disable(dev, 127); nv50_fifo_playlist_update(dev); + pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the channel resources */ + if (chan->user) { + iounmap(chan->user); + chan->user = NULL; + } nouveau_gpuobj_ref(NULL, &ramfc); nouveau_gpuobj_ref(NULL, &chan->cache); } @@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev) if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) return 0; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (!chan) { NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); return -EINVAL; @@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev) void nv50_fifo_tlb_flush(struct drm_device *dev) { - nv50_vm_flush(dev, 5); + nv50_vm_flush_engine(dev, 5); } diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index b2fab2bf3d61..6b149c0cc06d 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -26,6 +26,28 @@ #include "nouveau_drv.h" #include "nouveau_hw.h" +#include "nv50_display.h" + +static void nv50_gpio_isr(struct drm_device *dev); +static void nv50_gpio_isr_bh(struct work_struct *work); + +struct nv50_gpio_priv { + struct list_head handlers; + spinlock_t lock; +}; + +struct nv50_gpio_handler { + struct drm_device *dev; + struct list_head head; + struct work_struct work; + bool inhibit; + + struct dcb_gpio_entry *gpio; + + void (*handler)(void *data, int state); + void *data; +}; + static int nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) { @@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) return 0; } +int +nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, + void (*handler)(void *, int), void *data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh; + struct dcb_gpio_entry *gpio; + unsigned long flags; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; + + gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL); + if (!gpioh) + return -ENOMEM; + + INIT_WORK(&gpioh->work, nv50_gpio_isr_bh); + gpioh->dev = dev; + gpioh->gpio = gpio; + gpioh->handler = handler; + gpioh->data = data; + + spin_lock_irqsave(&priv->lock, flags); + list_add(&gpioh->head, &priv->handlers); + spin_unlock_irqrestore(&priv->lock, flags); + return 0; +} + void -nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) +nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, + void (*handler)(void *, int), void *data) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh, *tmp; struct dcb_gpio_entry *gpio; - u32 reg, mask; + unsigned long flags; gpio = nouveau_bios_gpio_entry(dev, tag); - if (!gpio) { - NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag); + if (!gpio) return; + + spin_lock_irqsave(&priv->lock, flags); + list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) { + if (gpioh->gpio != gpio || + gpioh->handler != handler || + gpioh->data != data) + continue; + list_del(&gpioh->head); + kfree(gpioh); } + spin_unlock_irqrestore(&priv->lock, flags); +} + +bool +nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) +{ + struct dcb_gpio_entry *gpio; + u32 reg, mask; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return false; reg = gpio->line < 16 ? 0xe050 : 0xe070; mask = 0x00010001 << (gpio->line & 0xf); nv_wr32(dev, reg + 4, mask); - nv_mask(dev, reg + 0, mask, on ? mask : 0); + reg = nv_mask(dev, reg + 0, mask, on ? mask : 0); + return (reg & mask) == mask; +} + +static int +nv50_gpio_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_LIST_HEAD(&priv->handlers); + spin_lock_init(&priv->lock); + pgpio->priv = priv; + return 0; +} + +static void +nv50_gpio_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + + kfree(pgpio->priv); + pgpio->priv = NULL; } int nv50_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv; + int ret; + + if (!pgpio->priv) { + ret = nv50_gpio_create(dev); + if (ret) + return ret; + } + priv = pgpio->priv; /* disable, and ack any pending gpio interrupts */ nv_wr32(dev, 0xe050, 0x00000000); @@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev) nv_wr32(dev, 0xe074, 0xffffffff); } + nouveau_irq_register(dev, 21, nv50_gpio_isr); return 0; } + +void +nv50_gpio_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nv_wr32(dev, 0xe050, 0x00000000); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe070, 0x00000000); + nouveau_irq_unregister(dev, 21); + + nv50_gpio_destroy(dev); +} + +static void +nv50_gpio_isr_bh(struct work_struct *work) +{ + struct nv50_gpio_handler *gpioh = + container_of(work, struct nv50_gpio_handler, work); + struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + unsigned long flags; + int state; + + state = pgpio->get(gpioh->dev, gpioh->gpio->tag); + if (state < 0) + return; + + gpioh->handler(gpioh->data, state); + + spin_lock_irqsave(&priv->lock, flags); + gpioh->inhibit = false; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void +nv50_gpio_isr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh; + u32 intr0, intr1 = 0; + u32 hi, lo, ch; + + intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); + if (dev_priv->chipset >= 0x90) + intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); + + hi = (intr0 & 0x0000ffff) | (intr1 << 16); + lo = (intr0 >> 16) | (intr1 & 0xffff0000); + ch = hi | lo; + + nv_wr32(dev, 0xe054, intr0); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, intr1); + + spin_lock(&priv->lock); + list_for_each_entry(gpioh, &priv->handlers, head) { + if (!(ch & (1 << gpioh->gpio->line))) + continue; + + if (gpioh->inhibit) + continue; + gpioh->inhibit = true; + + queue_work(dev_priv->wq, &gpioh->work); + } + spin_unlock(&priv->lock); +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8b669d0af610..c510e74acf4d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -29,6 +29,12 @@ #include "nouveau_drv.h" #include "nouveau_ramht.h" #include "nouveau_grctx.h" +#include "nouveau_dma.h" +#include "nouveau_vm.h" +#include "nv50_evo.h" + +static int nv50_graph_register(struct drm_device *); +static void nv50_graph_isr(struct drm_device *); static void nv50_graph_init_reset(struct drm_device *dev) @@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nouveau_irq_register(dev, 12, nv50_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); nv_wr32(dev, 0x400138, 0xffffffff); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); @@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev) nv50_graph_init_reset(dev); nv50_graph_init_regs__nv(dev); nv50_graph_init_regs(dev); - nv50_graph_init_intr(dev); ret = nv50_graph_init_ctxctl(dev); if (ret) return ret; + ret = nv50_graph_register(dev); + if (ret) + return ret; + nv50_graph_init_intr(dev); return 0; } @@ -158,6 +168,8 @@ void nv50_graph_takedown(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nv_wr32(dev, 0x40013c, 0x00000000); + nouveau_irq_unregister(dev, 12); } void @@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev) inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; @@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) @@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); dev_priv->engine.instmem.flush(dev); + atomic_inc(&chan->vm->pgraph_refs); return 0; } @@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; + unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin) return; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + for (i = hdr; i < hdr + 24; i += 4) nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + + atomic_dec(&chan->vm->pgraph_refs); } static int @@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev) return 0; } -void +static void nv50_graph_context_switch(struct drm_device *dev) { uint32_t inst; @@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev) } static int -nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct nouveau_gpuobj *gpuobj; @@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, } static int -nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) return -ERANGE; @@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, } static int -nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { chan->nvsw.vblsem_rval = data; return 0; } static int -nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) return -EINVAL; - if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & - NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { - nv_wr32(dev, NV50_PDISPLAY_INTR_1, - NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, - NV50_PDISPLAY_INTR_EN) | - NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); - } + drm_vblank_get(dev, data); + chan->nvsw.vblsem_head = data; list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); + return 0; } -static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { - { 0x018c, nv50_graph_nvsw_dma_vblsem }, - { 0x0400, nv50_graph_nvsw_vblsem_offset }, - { 0x0404, nv50_graph_nvsw_vblsem_release_val }, - { 0x0408, nv50_graph_nvsw_vblsem_release }, - {} -}; +static int +nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct nouveau_page_flip_state s; -struct nouveau_pgraph_object_class nv50_graph_grclass[] = { - { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ - { 0x0030, false, NULL }, /* null */ - { 0x5039, false, NULL }, /* m2mf */ - { 0x502d, false, NULL }, /* 2d */ - { 0x50c0, false, NULL }, /* compute */ - { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ - { 0x5097, false, NULL }, /* tesla (nv50) */ - { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ - { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ - { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ - {} -}; + if (!nouveau_finish_page_flip(chan, &s)) { + /* XXX - Do something here */ + } + + return 0; +} + +static int +nv50_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); + NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); + NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); + NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); + + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ + + /* tesla */ + if (dev_priv->chipset == 0x50) + NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ + else + if (dev_priv->chipset < 0xa0) + NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ + else { + switch (dev_priv->chipset) { + case 0xa0: + case 0xaa: + case 0xac: + NVOBJ_CLASS(dev, 0x8397, GR); + break; + case 0xa3: + case 0xa5: + case 0xa8: + NVOBJ_CLASS(dev, 0x8597, GR); + break; + case 0xaf: + NVOBJ_CLASS(dev, 0x8697, GR); + break; + } + } + + /* compute */ + NVOBJ_CLASS(dev, 0x50c0, GR); + if (dev_priv->chipset > 0xa0 && + dev_priv->chipset != 0xaa && + dev_priv->chipset != 0xac) + NVOBJ_CLASS(dev, 0x85c0, GR); + + dev_priv->engine.graph.registered = true; + return 0; +} void nv50_graph_tlb_flush(struct drm_device *dev) { - nv50_vm_flush(dev, 0); + nv50_vm_flush_engine(dev, 0); } void @@ -449,8 +515,500 @@ nv86_graph_tlb_flush(struct drm_device *dev) nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); } - nv50_vm_flush(dev, 0); + nv50_vm_flush_engine(dev, 0); nv_mask(dev, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } + +static struct nouveau_enum nv50_mp_exec_error_names[] = +{ + { 3, "STACK_UNDERFLOW" }, + { 4, "QUADON_ACTIVE" }, + { 8, "TIMEOUT" }, + { 0x10, "INVALID_OPCODE" }, + { 0x40, "BREAKPOINT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "IN" }, + { 0x00000004, "OUT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_strmout[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_ccache[] = { + { 0x00000001, "FAULT" }, + {} +}; + +/* There must be a *lot* of these. Will take some time to gather them up. */ +static struct nouveau_enum nv50_data_error_names[] = { + { 4, "INVALID_VALUE" }, + { 5, "INVALID_ENUM" }, + { 8, "INVALID_OBJECT" }, + { 0xc, "INVALID_BITFIELD" }, + { 0x28, "MP_NO_REG_SPACE" }, + { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_intr[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "COMPUTE_QUERY" }, + { 0x00000010, "ILLEGAL_MTHD" }, + { 0x00000020, "ILLEGAL_CLASS" }, + { 0x00000040, "DOUBLE_NOTIFY" }, + { 0x00001000, "CONTEXT_SWITCH" }, + { 0x00010000, "BUFFER_NOTIFY" }, + { 0x00100000, "DATA_ERROR" }, + { 0x00200000, "TRAP" }, + { 0x01000000, "SINGLE_STEP" }, + {} +}; + +static void +nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t units = nv_rd32(dev, 0x1540); + uint32_t addr, mp10, status, pc, oplow, ophigh; + int i; + int mps = 0; + for (i = 0; i < 4; i++) { + if (!(units & 1 << (i+24))) + continue; + if (dev_priv->chipset < 0xa0) + addr = 0x408200 + (tpid << 12) + (i << 7); + else + addr = 0x408100 + (tpid << 11) + (i << 7); + mp10 = nv_rd32(dev, addr + 0x10); + status = nv_rd32(dev, addr + 0x14); + if (!status) + continue; + if (display) { + nv_rd32(dev, addr + 0x20); + pc = nv_rd32(dev, addr + 0x24); + oplow = nv_rd32(dev, addr + 0x70); + ophigh= nv_rd32(dev, addr + 0x74); + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " + "TP %d MP %d: ", tpid, i); + nouveau_enum_print(nv50_mp_exec_error_names, status); + printk(" at %06x warp %d, opcode %08x %08x\n", + pc&0xffffff, pc >> 24, + oplow, ophigh); + } + nv_wr32(dev, addr + 0x10, mp10); + nv_wr32(dev, addr + 0x14, 0); + mps++; + } + if (!mps && display) + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " + "No MPs claiming errors?\n", tpid); +} + +static void +nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, + uint32_t ustatus_new, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int tps = 0; + uint32_t units = nv_rd32(dev, 0x1540); + int i, r; + uint32_t ustatus_addr, ustatus; + for (i = 0; i < 16; i++) { + if (!(units & (1 << i))) + continue; + if (dev_priv->chipset < 0xa0) + ustatus_addr = ustatus_old + (i << 12); + else + ustatus_addr = ustatus_new + (i << 11); + ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; + if (!ustatus) + continue; + tps++; + switch (type) { + case 6: /* texture error... unknown for now */ + nv50_fb_vm_trap(dev, display, name); + if (display) { + NV_ERROR(dev, "magic set %d:\n", i); + for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + } + break; + case 7: /* MP error */ + if (ustatus & 0x00010000) { + nv50_pgraph_mp_trap(dev, i, display); + ustatus &= ~0x00010000; + } + break; + case 8: /* TPDMA error */ + { + uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); + uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); + uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); + uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); + uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); + uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); + uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); + nv50_fb_vm_trap(dev, display, name); + /* 2d engine destination */ + if (ustatus & 0x00000010) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000010; + } + /* Render target */ + if (ustatus & 0x00000040) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000040; + } + /* CUDA memory: l[], g[] or stack. */ + if (ustatus & 0x00000080) { + if (display) { + if (e18 & 0x80000000) { + /* g[] read fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 24) & 0x1f)); + e18 &= ~0x1f000000; + } else if (e18 & 0xc) { + /* g[] write fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 7) & 0x1f)); + e18 &= ~0x00000f80; + } else { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", + i, e14, e10); + } + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000080; + } + } + break; + } + if (ustatus) { + if (display) + NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + } + nv_wr32(dev, ustatus_addr, 0xc0000000); + } + + if (!tps && display) + NV_INFO(dev, "%s - No TPs claiming errors?\n", name); +} + +static int +nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) +{ + u32 status = nv_rd32(dev, 0x400108); + u32 ustatus; + + if (!status && display) { + NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); + return 1; + } + + /* DISPATCH: Relays commands to other units and handles NOTIFY, + * COND, QUERY. If you get a trap from it, the command is still stuck + * in DISPATCH and you need to do something about it. */ + if (status & 0x001) { + ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); + } + + nv_wr32(dev, 0x400500, 0x00000000); + + /* Known to be triggered by screwed up NOTIFY and COND... */ + if (ustatus & 0x00000001) { + u32 addr = nv_rd32(dev, 0x400808); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 datal = nv_rd32(dev, 0x40080c); + u32 datah = nv_rd32(dev, 0x400810); + u32 class = nv_rd32(dev, 0x400814); + u32 r848 = nv_rd32(dev, 0x400848); + + NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); + if (display && (addr & 0x80000000)) { + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " + "subc %d class 0x%04x mthd 0x%04x " + "data 0x%08x%08x " + "400808 0x%08x 400848 0x%08x\n", + chid, inst, subc, class, mthd, datah, + datal, addr, r848); + } else + if (display) { + NV_INFO(dev, "PGRAPH - no stuck command?\n"); + } + + nv_wr32(dev, 0x400808, 0); + nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); + nv_wr32(dev, 0x400848, 0); + ustatus &= ~0x00000001; + } + + if (ustatus & 0x00000002) { + u32 addr = nv_rd32(dev, 0x40084c); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, 0x40085c); + u32 class = nv_rd32(dev, 0x400814); + + NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); + if (display && (addr & 0x80000000)) { + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " + "subc %d class 0x%04x mthd 0x%04x " + "data 0x%08x 40084c 0x%08x\n", + chid, inst, subc, class, mthd, + data, addr); + } else + if (display) { + NV_INFO(dev, "PGRAPH - no stuck command?\n"); + } + + nv_wr32(dev, 0x40084c, 0); + ustatus &= ~0x00000002; + } + + if (ustatus && display) { + NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " + "0x%08x)\n", ustatus); + } + + nv_wr32(dev, 0x400804, 0xc0000000); + nv_wr32(dev, 0x400108, 0x001); + status &= ~0x001; + if (!status) + return 0; + } + + /* M2MF: Memory to memory copy engine. */ + if (status & 0x002) { + u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_M2MF"); + nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", + nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), + nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 2); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x406800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x002); + status &= ~0x002; + } + + /* VFETCH: Fetches data from vertex buffers. */ + if (status & 0x004) { + u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); + nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", + nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), + nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); + } + + nv_wr32(dev, 0x400c04, 0xc0000000); + nv_wr32(dev, 0x400108, 0x004); + status &= ~0x004; + } + + /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ + if (status & 0x008) { + ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); + nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", + nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), + nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 0x80); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x401800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x008); + status &= ~0x008; + } + + /* CCACHE: Handles code and c[] caches and fills them. */ + if (status & 0x010) { + ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); + nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" + " %08x %08x %08x\n", + nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), + nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c), + nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814), + nv_rd32(dev, 0x40581c)); + + } + + nv_wr32(dev, 0x405018, 0xc0000000); + nv_wr32(dev, 0x400108, 0x010); + status &= ~0x010; + } + + /* Unknown, not seen yet... 0x402000 is the only trap status reg + * remaining, so try to handle it anyway. Perhaps related to that + * unknown DMA slot on tesla? */ + if (status & 0x20) { + ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; + if (display) + NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); + nv_wr32(dev, 0x402000, 0xc0000000); + /* no status modifiction on purpose */ + } + + /* TEXTURE: CUDA texturing units */ + if (status & 0x040) { + nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, + "PGRAPH - TRAP_TEXTURE"); + nv_wr32(dev, 0x400108, 0x040); + status &= ~0x040; + } + + /* MP: CUDA execution engines. */ + if (status & 0x080) { + nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, + "PGRAPH - TRAP_MP"); + nv_wr32(dev, 0x400108, 0x080); + status &= ~0x080; + } + + /* TPDMA: Handles TP-initiated uncached memory accesses: + * l[], g[], stack, 2d surfaces, render targets. */ + if (status & 0x100) { + nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, + "PGRAPH - TRAP_TPDMA"); + nv_wr32(dev, 0x400108, 0x100); + status &= ~0x100; + } + + if (status) { + if (display) + NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); + nv_wr32(dev, 0x400108, status); + } + + return 1; +} + +static int +nv50_graph_isr_chid(struct drm_device *dev, u64 inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + int i; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->ramin) + continue; + + if (inst == chan->ramin->vinst) + break; + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return i; +} + +static void +nv50_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, 0x400100))) { + u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; + u32 chid = nv50_graph_isr_chid(dev, inst); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400814); + u32 show = stat; + + if (stat & 0x00000010) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, + mthd, data)) + show &= ~0x00000010; + } + + if (stat & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, 0x400100, 0x00001000); + nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); + nv50_graph_context_switch(dev); + stat &= ~0x00001000; + show &= ~0x00001000; + } + + show = (show && nouveau_ratelimit()) ? show : 0; + + if (show & 0x00100000) { + u32 ecode = nv_rd32(dev, 0x400110); + NV_INFO(dev, "PGRAPH - DATA_ERROR "); + nouveau_enum_print(nv50_data_error_names, ecode); + printk("\n"); + } + + if (stat & 0x00200000) { + if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) + show &= ~0x00200000; + } + + nv_wr32(dev, 0x400100, stat); + nv_wr32(dev, 0x400500, 0x00010001); + + if (show) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv50_graph_intr, show); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + } + } + + if (nv_rd32(dev, 0x400824) & (1 << 31)) + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); +} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index b773229b7647..adac4da98f7e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -27,14 +27,20 @@ #include "drmP.h" #include "drm.h" + #include "nouveau_drv.h" +#include "nouveau_vm.h" + +#define BAR1_VM_BASE 0x0020000000ULL +#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1) +#define BAR3_VM_BASE 0x0000000000ULL +#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3) struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ - struct nouveau_gpuobj *pramin_pt; - struct nouveau_gpuobj *pramin_bar; - struct nouveau_gpuobj *fb_bar; + struct nouveau_gpuobj *bar1_dmaobj; + struct nouveau_gpuobj *bar3_dmaobj; }; static void @@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan) return; nouveau_gpuobj_ref(NULL, &chan->ramfc); + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); @@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan) } static int -nv50_channel_new(struct drm_device *dev, u32 size, +nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, struct nouveau_channel **pchan) { struct drm_nouveau_private *dev_priv = dev->dev_private; u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; struct nouveau_channel *chan; - int ret; + int ret, i; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) @@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size, return ret; } + for (i = 0; i < 0x4000; i += 8) { + nv_wo32(chan->vm_pd, i + 0, 0x00000000); + nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); + } + + ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : chan->ramin->pinst + fc, chan->ramin->vinst + fc, 0x100, @@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv; struct nouveau_channel *chan; + struct nouveau_vm *vm; int ret, i; u32 tmp; @@ -127,112 +146,89 @@ nv50_instmem_init(struct drm_device *dev) ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); if (ret) { NV_ERROR(dev, "Failed to init RAMIN heap\n"); - return -ENOMEM; + goto error; } - /* we need a channel to plug into the hw to control the BARs */ - ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); + /* BAR3 */ + ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, + 29, 12, 16, &dev_priv->bar3_vm); if (ret) - return ret; - chan = dev_priv->fifos[127] = dev_priv->fifos[0]; + goto error; - /* allocate page table for PRAMIN BAR */ - ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC, - &priv->pramin_pt); + ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, + 0x1000, NVOBJ_FLAG_DONT_MAP | + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->bar3_vm->pgt[0].obj); if (ret) - return ret; + goto error; + dev_priv->bar3_vm->pgt[0].page_shift = 12; + dev_priv->bar3_vm->pgt[0].refcount = 1; - nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); - nv_wo32(chan->vm_pd, 0x0004, 0); + nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj); - /* DMA object for PRAMIN BAR */ - ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); + ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); if (ret) - return ret; - nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); - nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); - nv_wo32(priv->pramin_bar, 0x08, 0x00000000); - nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); - nv_wo32(priv->pramin_bar, 0x10, 0x00000000); - nv_wo32(priv->pramin_bar, 0x14, 0x00000000); - - /* map channel into PRAMIN, gpuobj didn't do it for us */ - ret = nv50_instmem_bind(dev, chan->ramin); + goto error; + dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan; + + ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE, + NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, + NV_MEM_TYPE_VM, NV_MEM_COMP_VM, + &priv->bar3_dmaobj); if (ret) - return ret; + goto error; - /* poke regs... */ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); - nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); - - tmp = nv_ri32(dev, 0); - nv_wi32(dev, 0, ~tmp); - if (nv_ri32(dev, 0) != ~tmp) { - NV_ERROR(dev, "PRAMIN readback failed\n"); - return -EIO; - } - nv_wi32(dev, 0, tmp); + nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); + dev_priv->engine.instmem.flush(dev); dev_priv->ramin_available = true; - /* Determine VM layout */ - dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); - dev_priv->vm_gart_size = NV50_VM_BLOCK; - - dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; - dev_priv->vm_vram_size = dev_priv->vram_size; - if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) - dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; - dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); - dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK; - - dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size; - - NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n", - dev_priv->vm_gart_base, - dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1); - NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n", - dev_priv->vm_vram_base, - dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); - - /* VRAM page table(s), mapped into VM at +1GiB */ - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, - 0, NVOBJ_FLAG_ZERO_ALLOC, - &chan->vm_vram_pt[i]); - if (ret) { - NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); - dev_priv->vm_vram_pt_nr = i; - return ret; - } - dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; - - nv_wo32(chan->vm_pd, 0x10 + (i*8), - chan->vm_vram_pt[i]->vinst | 0x61); - nv_wo32(chan->vm_pd, 0x14 + (i*8), 0); + tmp = nv_ro32(chan->ramin, 0); + nv_wo32(chan->ramin, 0, ~tmp); + if (nv_ro32(chan->ramin, 0) != ~tmp) { + NV_ERROR(dev, "PRAMIN readback failed\n"); + ret = -EIO; + goto error; } + nv_wo32(chan->ramin, 0, tmp); - /* DMA object for FB BAR */ - ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); + /* BAR1 */ + ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, + 29, 12, 16, &vm); if (ret) - return ret; - nv_wo32(priv->fb_bar, 0x00, 0x7fc00000); - nv_wo32(priv->fb_bar, 0x04, 0x40000000 + - pci_resource_len(dev->pdev, 1) - 1); - nv_wo32(priv->fb_bar, 0x08, 0x40000000); - nv_wo32(priv->fb_bar, 0x0c, 0x00000000); - nv_wo32(priv->fb_bar, 0x10, 0x00000000); - nv_wo32(priv->fb_bar, 0x14, 0x00000000); + goto error; - dev_priv->engine.instmem.flush(dev); + ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); + if (ret) + goto error; + nouveau_vm_ref(NULL, &vm, NULL); + + ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, + NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, + NV_MEM_TYPE_VM, NV_MEM_COMP_VM, + &priv->bar1_dmaobj); + if (ret) + goto error; - nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); + nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4)); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); + /* Create shared channel VM, space is reserved at the beginning + * to catch "NULL pointer" references + */ + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, + 29, 12, 16, &dev_priv->chan_vm); + if (ret) + return ret; + return 0; + +error: + nv50_instmem_takedown(dev); + return ret; } void @@ -240,7 +236,7 @@ nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; NV_DEBUG(dev, "\n"); @@ -250,23 +246,23 @@ nv50_instmem_takedown(struct drm_device *dev) dev_priv->ramin_available = false; - /* Restore state from before init */ + nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); + for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); - nouveau_gpuobj_ref(NULL, &priv->fb_bar); - nouveau_gpuobj_ref(NULL, &priv->pramin_bar); - nouveau_gpuobj_ref(NULL, &priv->pramin_pt); + nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); + nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); - /* Destroy dummy channel */ - if (chan) { - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); - dev_priv->vm_vram_pt_nr = 0; + nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); + dev_priv->channels.ptr[127] = 0; + nv50_channel_del(&dev_priv->channels.ptr[0]); - nv50_channel_del(&dev_priv->fifos[0]); - dev_priv->fifos[127] = NULL; - } + nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj); + nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); + + if (dev_priv->ramin_heap.free_stack.next) + drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; kfree(priv); @@ -276,16 +272,8 @@ int nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin; - int i; - ramin->im_backing_suspend = vmalloc(ramin->size); - if (!ramin->im_backing_suspend) - return -ENOMEM; - - for (i = 0; i < ramin->size; i += 4) - ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); + dev_priv->ramin_available = false; return 0; } @@ -294,146 +282,121 @@ nv50_instmem_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin; + struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; - dev_priv->ramin_available = false; - dev_priv->ramin_base = ~0; - for (i = 0; i < ramin->size; i += 4) - nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); - dev_priv->ramin_available = true; - vfree(ramin->im_backing_suspend); - ramin->im_backing_suspend = NULL; - /* Poke the relevant regs, and pray it works :) */ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); + + dev_priv->ramin_available = true; } +struct nv50_gpuobj_node { + struct nouveau_vram *vram; + struct nouveau_vma chan_vma; + u32 align; +}; + + int -nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *sz) +nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + struct nv50_gpuobj_node *node = NULL; int ret; - if (gpuobj->im_backing) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - *sz = ALIGN(*sz, 4096); - if (*sz == 0) - return -EINVAL; + size = (size + 4095) & ~4095; + align = max(align, (u32)4096); - ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, - true, false, &gpuobj->im_backing); + ret = vram->get(dev, size, align, 0, 0, &node->vram); if (ret) { - NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); + kfree(node); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); - if (ret) { - NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - return ret; + gpuobj->vinst = node->vram->offset; + + if (gpuobj->flags & NVOBJ_FLAG_VM) { + ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, + NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, + &node->chan_vma); + if (ret) { + vram->put(dev, &node->vram); + kfree(node); + return ret; + } + + nouveau_vm_map(&node->chan_vma, node->vram); + gpuobj->vinst = node->chan_vma.offset; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->size = size; + gpuobj->node = node; return 0; } void -nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { + struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; + struct nv50_gpuobj_node *node; + + node = gpuobj->node; + gpuobj->node = NULL; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; + if (node->chan_vma.node) { + nouveau_vm_unmap(&node->chan_vma); + nouveau_vm_put(&node->chan_vma); } + vram->put(dev, &node->vram); + kfree(node); } int -nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); - - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - vram = gpuobj->vinst; - - NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); - - vram |= 1; - if (dev_priv->vram_sys_base) { - vram += dev_priv->vram_sys_base; - vram |= 0x30; - } - - while (pte < pte_end) { - nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); - nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); - vram += 0x1000; - pte += 2; - } - dev_priv->engine.instmem.flush(dev); + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nv50_gpuobj_node *node = gpuobj->node; + int ret; - nv50_vm_flush(dev, 6); + ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, + NV_MEM_ACCESS_RW, &node->vram->bar_vma); + if (ret) + return ret; - gpuobj->im_bound = 1; + nouveau_vm_map(&node->vram->bar_vma, node->vram); + gpuobj->pinst = node->vram->bar_vma.offset; return 0; } -int -nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - uint32_t pte, pte_end; - - if (gpuobj->im_bound == 0) - return -EINVAL; - - /* can happen during late takedown */ - if (unlikely(!dev_priv->ramin_available)) - return 0; + struct nv50_gpuobj_node *node = gpuobj->node; - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - - while (pte < pte_end) { - nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); - nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); - pte += 2; + if (node->vram->bar_vma.node) { + nouveau_vm_unmap(&node->vram->bar_vma); + nouveau_vm_put(&node->vram->bar_vma); } - dev_priv->engine.instmem.flush(dev); - - gpuobj->im_bound = 0; - return 0; } void @@ -452,11 +415,3 @@ nv84_instmem_flush(struct drm_device *dev) NV_ERROR(dev, "PRAMIN flush timeout\n"); } -void -nv50_vm_flush(struct drm_device *dev, int engine) -{ - nv_wr32(dev, 0x100c80, (engine << 16) | 1); - if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) - NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); -} - diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c new file mode 100644 index 000000000000..eebab95f59b2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -0,0 +1,178 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_vm.h" + +void +nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt) +{ + struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; + u32 coverage = (pgt->size >> 3) << type; + u64 phys; + + phys = pgt->vinst; + phys |= 0x01; /* present */ + phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ + if (dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + phys |= 0x30; + } + + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage < 128 * 1024 * 1024) + phys |= 0x20; + + nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); + nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); +} + +void +nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde) +{ + nv_wo32(pgd, (pde * 8) + 0, 0x00000000); + nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe); +} + +static inline u64 +nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + u64 phys, u32 memtype, u32 target) +{ + struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; + + phys |= 1; /* present */ + phys |= (u64)memtype << 40; + + /* IGPs don't have real VRAM, re-target to stolen system memory */ + if (target == 0 && dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + target = 3; + } + + phys |= target << 4; + + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= (1 << 6); + + if (!(vma->access & NV_MEM_ACCESS_WO)) + phys |= (1 << 3); + + return phys; +} + +void +nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) +{ + u32 block, i; + + phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); + pte <<= 3; + cnt <<= 3; + + while (cnt) { + u32 offset_h = upper_32_bits(phys); + u32 offset_l = lower_32_bits(phys); + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 3); + if (cnt >= block && !(pte & (block - 1))) + break; + } + offset_l |= (i << 7); + + phys += block << (vma->node->type - 3); + cnt -= block; + + while (block) { + nv_wo32(pgt, pte + 0, offset_l); + nv_wo32(pgt, pte + 4, offset_h); + pte += 8; + block -= 8; + } + } +} + +void +nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + u32 pte, dma_addr_t *list, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +void +nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +void +nv50_vm_flush(struct nouveau_vm *vm) +{ + struct drm_nouveau_private *dev_priv = vm->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; + + pinstmem->flush(vm->dev); + + /* BAR */ + if (vm != dev_priv->chan_vm) { + nv50_vm_flush_engine(vm->dev, 6); + return; + } + + pfifo->tlb_flush(vm->dev); + + if (atomic_read(&vm->pgraph_refs)) + pgraph->tlb_flush(vm->dev); + if (atomic_read(&vm->pcrypt_refs)) + pcrypt->tlb_flush(vm->dev); +} + +void +nv50_vm_flush_engine(struct drm_device *dev, int engine) +{ + nv_wr32(dev, 0x100c80, (engine << 16) | 1); + if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) + NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); +} diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c new file mode 100644 index 000000000000..47489ed0a5a8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -0,0 +1,190 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +static int types[0x80] = { + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 +}; + +bool +nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) +{ + int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; + + if (likely(type < sizeof(types) && types[type])) + return true; + return false; +} + +void +nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *this; + struct nouveau_vram *vram; + + vram = *pvram; + *pvram = NULL; + if (unlikely(vram == NULL)) + return; + + mutex_lock(&mm->mutex); + while (!list_empty(&vram->regions)) { + this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); + + list_del(&this->rl_entry); + nouveau_mm_put(mm, this); + } + mutex_unlock(&mm->mutex); + + kfree(vram); +} + +int +nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, + u32 type, struct nouveau_vram **pvram) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *r; + struct nouveau_vram *vram; + int ret; + + if (!types[type]) + return -EINVAL; + size >>= 12; + align >>= 12; + size_nc >>= 12; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + INIT_LIST_HEAD(&vram->regions); + vram->dev = dev_priv->dev; + vram->memtype = type; + vram->size = size; + + mutex_lock(&mm->mutex); + do { + ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); + if (ret) { + mutex_unlock(&mm->mutex); + nv50_vram_del(dev, &vram); + return ret; + } + + list_add_tail(&r->rl_entry, &vram->regions); + size -= r->length; + } while (size); + mutex_unlock(&mm->mutex); + + r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); + vram->offset = (u64)r->offset << 12; + *pvram = vram; + return 0; +} + +static u32 +nv50_vram_rblock(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru, rblock_size; + + r0 = nv_rd32(dev, 0x100200); + r4 = nv_rd32(dev, 0x100204); + rt = nv_rd32(dev, 0x100250); + ru = nv_rd32(dev, 0x001540); + NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = ((r4 & 0x01000000) ? 8 : 4); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != dev_priv->vram_size) { + NV_WARN(dev, "memory controller reports %dMiB VRAM\n", + (u32)(dev_priv->vram_size >> 20)); + NV_WARN(dev, "we calculated %dMiB VRAM\n", + (u32)(predicted >> 20)); + } + + rblock_size = rowsize; + if (rt & 1) + rblock_size *= 3; + + NV_DEBUG(dev, "rblock %d bytes\n", rblock_size); + return rblock_size; +} + +int +nv50_vram_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + dev_priv->vram_size = nv_rd32(dev, 0x10020c); + dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; + dev_priv->vram_size &= 0xffffffff00ULL; + + switch (dev_priv->chipset) { + case 0xaa: + case 0xac: + case 0xaf: + dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; + dev_priv->vram_rblock_size = 4096; + break; + default: + dev_priv->vram_rblock_size = nv50_vram_rblock(dev); + break; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c new file mode 100644 index 000000000000..ec18ae1c3886 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -0,0 +1,140 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_util.h" +#include "nouveau_vm.h" + +static void nv84_crypt_isr(struct drm_device *); + +int +nv84_crypt_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramin = chan->ramin; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + ret = nouveau_gpuobj_new(dev, chan, 256, 0, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + &chan->crypt_ctx); + if (ret) + return ret; + + nv_wo32(ramin, 0xa0, 0x00190000); + nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); + nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); + nv_wo32(ramin, 0xac, 0); + nv_wo32(ramin, 0xb0, 0); + nv_wo32(ramin, 0xb4, 0); + + dev_priv->engine.instmem.flush(dev); + atomic_inc(&chan->vm->pcrypt_refs); + return 0; +} + +void +nv84_crypt_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + u32 inst; + + if (!chan->crypt_ctx) + return; + + inst = (chan->ramin->vinst >> 12); + inst |= 0x80000000; + + /* mark context as invalid if still on the hardware, not + * doing this causes issues the next time PCRYPT is used, + * unsurprisingly :) + */ + nv_wr32(dev, 0x10200c, 0x00000000); + if (nv_rd32(dev, 0x102188) == inst) + nv_mask(dev, 0x102188, 0x80000000, 0x00000000); + if (nv_rd32(dev, 0x10218c) == inst) + nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); + nv_wr32(dev, 0x10200c, 0x00000010); + + nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); + atomic_dec(&chan->vm->pcrypt_refs); +} + +void +nv84_crypt_tlb_flush(struct drm_device *dev) +{ + nv50_vm_flush_engine(dev, 0x0a); +} + +int +nv84_crypt_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; + + if (!pcrypt->registered) { + NVOBJ_CLASS(dev, 0x74c1, CRYPT); + pcrypt->registered = true; + } + + nv_mask(dev, 0x000200, 0x00004000, 0x00000000); + nv_mask(dev, 0x000200, 0x00004000, 0x00004000); + + nouveau_irq_register(dev, 14, nv84_crypt_isr); + nv_wr32(dev, 0x102130, 0xffffffff); + nv_wr32(dev, 0x102140, 0xffffffbf); + + nv_wr32(dev, 0x10200c, 0x00000010); + return 0; +} + +void +nv84_crypt_fini(struct drm_device *dev) +{ + nv_wr32(dev, 0x102140, 0x00000000); + nouveau_irq_unregister(dev, 14); +} + +static void +nv84_crypt_isr(struct drm_device *dev) +{ + u32 stat = nv_rd32(dev, 0x102130); + u32 mthd = nv_rd32(dev, 0x102190); + u32 data = nv_rd32(dev, 0x102194); + u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; + int show = nouveau_ratelimit(); + + if (show) { + NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", + stat, mthd, data, inst); + } + + nv_wr32(dev, 0x102130, stat); + nv_wr32(dev, 0x10200c, 0x10); + + nv50_fb_vm_trap(dev, show, "PCRYPT"); +} diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 13a0f78a9088..39232085193d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -26,67 +26,89 @@ #include "nouveau_drv.h" +struct nvc0_gpuobj_node { + struct nouveau_bo *vram; + struct drm_mm_node *ramin; + u32 align; +}; + int -nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *size) +nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct nvc0_gpuobj_node *node = NULL; int ret; - *size = ALIGN(*size, 4096); - if (*size == 0) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, - true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); + nouveau_bo_ref(NULL, &node->vram); return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; + gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->node = node; return 0; } void -nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_gpuobj_node *node; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; - } + node = gpuobj->node; + gpuobj->node = NULL; + + nouveau_bo_unpin(node->vram); + nouveau_bo_ref(NULL, &node->vram); + kfree(node); } int -nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); - - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + struct drm_device *dev = gpuobj->dev; + struct drm_mm_node *ramin = NULL; + u32 pte, pte_end; + u64 vram; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, + node->align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + pte = (ramin->start >> 12) << 1; + pte_end = ((ramin->size >> 12) << 1) + pte; vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); + ramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); while (pte < pte_end) { @@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv_wr32(dev, 0x100cbc, 0x80000005); } - gpuobj->im_bound = 1; + node->ramin = ramin; + gpuobj->pinst = ramin->start; return 0; } -int -nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + u32 pte, pte_end; - if (gpuobj->im_bound == 0) - return -EINVAL; + if (!node->ramin || !dev_priv->ramin_available) + return; + + pte = (node->ramin->start >> 12) << 1; + pte_end = ((node->ramin->size >> 12) << 1) + pte; - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; while (pte < pte_end) { - nv_wr32(dev, 0x702000 + (pte * 8), 0); - nv_wr32(dev, 0x702004 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); pte++; } - dev_priv->engine.instmem.flush(dev); + dev_priv->engine.instmem.flush(gpuobj->dev); - gpuobj->im_bound = 0; - return 0; + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(node->ramin); + node->ramin = NULL; + spin_unlock(&dev_priv->ramin_lock); } void diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index 881f8a585613..fe0f253089ac 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -153,7 +153,8 @@ #define NV_PCRTC_START 0x00600800 #define NV_PCRTC_CONFIG 0x00600804 # define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) -# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) +# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0) +# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) #define NV_PCRTC_CURSOR_CONFIG 0x00600810 # define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) # define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 6cae4f2028d2..e97e6f842699 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o + evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ + radeon_trace_points.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o obj-$(CONFIG_DRM_RADEON)+= radeon.o + +CFLAGS_radeon_trace_points.o := -I$(src)
\ No newline at end of file diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index c714179d1bfa..c61c3fe9fb98 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h @@ -37,6 +37,8 @@ #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 #define GRAPH_OBJECT_TYPE_ROUTER 0x4 /* deleted */ +#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6 +#define GRAPH_OBJECT_TYPE_GENERIC 0x7 /****************************************************/ /* Encoder Object ID Definition */ @@ -64,6 +66,9 @@ #define ENCODER_OBJECT_ID_VT1623 0x10 #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 +#define ENCODER_OBJECT_ID_ALMOND 0x22 +#define ENCODER_OBJECT_ID_TRAVIS 0x23 +#define ENCODER_OBJECT_ID_NUTMEG 0x22 /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 @@ -108,6 +113,7 @@ #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 +#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 /* deleted */ @@ -124,6 +130,7 @@ #define GENERIC_OBJECT_ID_GLSYNC 0x01 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 #define GENERIC_OBJECT_ID_MXM_OPM 0x03 +#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin /****************************************************/ /* Graphics Object ENUM ID Definition */ @@ -360,6 +367,26 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) +#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) + +#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) + +#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) + +#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) + +#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ @@ -421,6 +448,14 @@ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) @@ -512,6 +547,7 @@ #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) + #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) @@ -593,6 +629,14 @@ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC +#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) + +#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) + /****************************************************/ /* Router Object ID definition - Shared with BIOS */ /****************************************************/ @@ -621,6 +665,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) +#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT) + /****************************************************/ /* Object Cap definition - Shared with BIOS */ /****************************************************/ diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index fe359a239df3..58a0cd02c0a2 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -73,8 +73,18 @@ #define ATOM_PPLL1 0 #define ATOM_PPLL2 1 #define ATOM_DCPLL 2 +#define ATOM_PPLL0 2 +#define ATOM_EXT_PLL1 8 +#define ATOM_EXT_PLL2 9 +#define ATOM_EXT_CLOCK 10 #define ATOM_PPLL_INVALID 0xFF +#define ENCODER_REFCLK_SRC_P1PLL 0 +#define ENCODER_REFCLK_SRC_P2PLL 1 +#define ENCODER_REFCLK_SRC_DCPLL 2 +#define ENCODER_REFCLK_SRC_EXTCLK 3 +#define ENCODER_REFCLK_SRC_INVALID 0xFF + #define ATOM_SCALER1 0 #define ATOM_SCALER2 1 @@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER /*Image can't be updated, while Driver needs to carry the new table! */ }ATOM_COMMON_TABLE_HEADER; +/****************************************************************************/ +// Structure stores the ROM header. +/****************************************************************************/ typedef struct _ATOM_ROM_HEADER { ATOM_COMMON_TABLE_HEADER sHeader; @@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER #define USHORT void* #endif +/****************************************************************************/ +// Structures used in Command.mtb +/****************************************************************************/ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON @@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange #define HPDInterruptService ReadHWAssistedI2CStatus #define EnableVGA_Access GetSCLKOverMCLKRatio +#define GetDispObjectInfo EnableYUV typedef struct _ATOM_MASTER_COMMAND_TABLE { @@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER /****************************************************************************/ #define COMPUTE_MEMORY_PLL_PARAM 1 #define COMPUTE_ENGINE_PLL_PARAM 2 +#define ADJUST_MC_SETTING_PARAM 3 + +/****************************************************************************/ +// Structures used by AdjustMemoryControllerTable +/****************************************************************************/ +typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulClockFreq:24; +#else + ULONG ulClockFreq:24; + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block +#endif +}ATOM_ADJUST_MEMORY_CLOCK_FREQ; +#define POINTER_RETURN_FLAG 0x80 typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { @@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 #endif }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 +{ + union + { + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter + }; + UCHAR ucRefDiv; //Output Parameter + UCHAR ucPostDiv; //Output Parameter + union + { + UCHAR ucCntlFlag; //Output Flags + UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode + }; + UCHAR ucReserved; +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; + +// ucInputFlag +#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode + typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER { ATOM_COMPUTE_CLOCK_FREQ ulClock; @@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 #define ATOM_ENCODER_CONFIG_LINKA 0x00 #define ATOM_ENCODER_CONFIG_LINKB 0x04 @@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS #define ATOM_ENCODER_MODE_TV 13 #define ATOM_ENCODER_MODE_CV 14 #define ATOM_ENCODER_MODE_CRT 15 +#define ATOM_ENCODER_MODE_DVO 16 +#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 +#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { @@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d @@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 +//ucTableFormatRevision=1 +//ucTableContentRevision=3 // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 { #if ATOM_BIG_ENDIAN UCHAR ucReserved1:1; - UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) UCHAR ucReserved:3; UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz #else UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz UCHAR ucReserved:3; - UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) UCHAR ucReserved1:1; #endif }ATOM_DIG_ENCODER_CONFIG_V3; +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 - +#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 { @@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 UCHAR ucReserved; }DIG_ENCODER_CONTROL_PARAMETERS_V3; +//ucTableFormatRevision=1 +//ucTableContentRevision=4 +// start from NI +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver +typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:1; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) + UCHAR ucReserved:2; + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version +#else + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version + UCHAR ucReserved:2; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) + UCHAR ucReserved1:1; +#endif +}ATOM_DIG_ENCODER_CONFIG_V4; + +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 +#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 +#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + union{ + ATOM_DIG_ENCODER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + // =5: DP audio + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP + UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version +}DIG_ENCODER_CONTROL_PARAMETERS_V4; // define ucBitPerColor: #define PANEL_BPC_UNDEFINE 0x00 @@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 #endif }ATOM_DIG_TRANSMITTER_CONFIG_V3; + typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 { union @@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF + +/****************************************************************************/ +// Structures used by UNIPHYTransmitterControlTable V1.4 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=4 +/****************************************************************************/ +typedef struct _ATOM_DP_VS_MODE_V4 +{ + UCHAR ucLaneSel; + union + { + UCHAR ucLaneSet; + struct { +#if ATOM_BIG_ENDIAN + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level +#else + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 +#endif + }; + }; +}ATOM_DP_VS_MODE_V4; + +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V4; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version + }; + union + { + ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucLaneNum; + UCHAR ucReserved[3]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 +// Bit5:4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 +#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 +#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF + + +/****************************************************************************/ +// Structures used by ExternalEncoderControlTable V1.3 +// ASIC Families: Evergreen, Llano, NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ + +typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 +{ + union{ + USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT + USHORT usConnectorId; // connector id, valid when ucAction = INIT + }; + UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucAction; // + UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP + UCHAR ucReserved; +}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; + +// ucAction +#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 +#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 +#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 + +// ucConfig +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 + +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 +{ + EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; + ULONG ulReserved[2]; +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; + + /****************************************************************************/ // Structures used by DAC1OuputControlTable // DAC2OuputControlTable @@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2 #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 + typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) @@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5 #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 +typedef struct _CRTC_PIXEL_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. +#else + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. +#endif +}CRTC_PIXEL_CLOCK_FREQ; + +typedef struct _PIXEL_CLOCK_PARAMETERS_V6 +{ + union{ + CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency + ULONG ulDispEngClkFreq; // dispclk frequency + }; + USHORT usFbDiv; // feedback divider integer part. + UCHAR ucPostDiv; // post divider. + UCHAR ucRefDiv; // Reference divider + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL + UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, + // indicate which graphic encoder will be used. + UCHAR ucEncoderMode; // Encoder mode: + UCHAR ucMiscInfo; // bit[0]= Force program PPLL + // bit[1]= when VGA timing is used. + // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp + // bit[4]= RefClock source for PPLL. + // =0: XTLAIN( default mode ) + // =1: other external clock source, which is pre-defined + // by VBIOS depend on the feature required. + // bit[7:5]: reserved. + ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) + +}PIXEL_CLOCK_PARAMETERS_V6; + +#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c +#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 +#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 +#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 +#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c +#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 + typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 { PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; @@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 { USHORT usPixelClock; // target pixel clock - UCHAR ucTransmitterID; // transmitter id defined in objectid.h + UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX - UCHAR ucReserved[3]; + UCHAR ucExtTransmitterID; // external encoder id. + UCHAR ucReserved[2]; }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; // usDispPllConfig v1.2 for RoadRunner @@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS /**************************************************************************/ #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + /****************************************************************************/ // Structures used by PowerConnectorDetectionTable /****************************************************************************/ @@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 +// Used by DCE5.0 + typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 +{ + USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 + UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. + // Bit[1]: 1-Ext. 0-Int. + // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL + // Bits[7:4] reserved + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] + USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; + +#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 +#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 +#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 +#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c +#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 +#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 +#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 + #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL /**************************************************************************/ @@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES USHORT StandardVESA_Timing; // Only used by Bios USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 USHORT DAC_Info; // Will be obsolete from R600 - USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 + USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info USHORT TMDS_Info; // Will be obsolete from R600 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 USHORT SupportedDevicesInfo; // Will be obsolete from R600 @@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 }ATOM_MASTER_LIST_OF_DATA_TABLES; +// For backward compatible +#define LVDS_Info LCD_Info + typedef struct _ATOM_MASTER_DATA_TABLE { ATOM_COMMON_TABLE_HEADER sHeader; ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; }ATOM_MASTER_DATA_TABLE; + /****************************************************************************/ // Structure used in MultimediaCapabilityInfoTable /****************************************************************************/ @@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) }ATOM_MULTIMEDIA_CONFIG_INFO; + /****************************************************************************/ // Structures used in FirmwareInfoTable /****************************************************************************/ @@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1 UCHAR ucReserved4[3]; }ATOM_FIRMWARE_INFO_V2_1; +//the structure below to be used from NI +//ucTableFormatRevision=2 +//ucTableContentRevision=2 +typedef struct _ATOM_FIRMWARE_INFO_V2_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulReserved[2]; + ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* + ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? + ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. + UCHAR ucReserved3; //Was ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ulReserved4; //Was ulAsicMaximumVoltage + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input + ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input + ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output + USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usCoreReferenceClock; //In 10Khz unit + USHORT usMemoryReferenceClock; //In 10Khz unit + USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock + UCHAR ucMemoryModule_ID; //Indicate what is the board design + UCHAR ucReserved9[3]; + USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + USHORT usReserved12; + ULONG ulReserved10[3]; // New added comparing to previous version +}ATOM_FIRMWARE_INFO_V2_2; -#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 +#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 /****************************************************************************/ // Structures used in IntegratedSystemInfoTable @@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi ucDockingPinBit: which bit in this register to read the pin status; ucDockingPinPolarity:Polarity of the pin when docked; -ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 +ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. @@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep usMinDownStreamHTLinkWidth: same as above. */ +// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition +#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 + +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 @@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12 #define PANEL_RANDOM_DITHER 0x80 #define PANEL_RANDOM_DITHER_MASK 0x80 +#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this + +/****************************************************************************/ +// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ +typedef struct _ATOM_LCD_INFO_V13 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usExtInfoTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + ULONG ulReserved0; + UCHAR ucLCD_Misc; // Reorganized in V13 + // Bit0: {=0:single, =1:dual}, + // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, + // Bit3:2: {Grey level} + // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) + // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; + USHORT usLCDVenderID; + USHORT usLCDProductID; + UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 + // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own + // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED + // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) + // Bit7-3: Reserved + UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable + USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 + + UCHAR ucPowerSequenceDIGONtoDE_in4Ms; + UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; + UCHAR ucPowerSequenceDEtoDIGON_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; + + UCHAR ucOffDelay_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; + UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; + UCHAR ucReserved1; + + ULONG ulReserved[4]; +}ATOM_LCD_INFO_V13; + +#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 + +//Definitions for ucLCD_Misc +#define ATOM_PANEL_MISC_V13_DUAL 0x00000001 +#define ATOM_PANEL_MISC_V13_FPDI 0x00000002 +#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C +#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 +#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 +#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 +#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 + +//Color Bit Depth definition in EDID V1.4 @BYTE 14h +//Bit 6 5 4 + // 0 0 0 - Color bit depth is undefined + // 0 0 1 - 6 Bits per Primary Color + // 0 1 0 - 8 Bits per Primary Color + // 0 1 1 - 10 Bits per Primary Color + // 1 0 0 - 12 Bits per Primary Color + // 1 0 1 - 14 Bits per Primary Color + // 1 1 0 - 16 Bits per Primary Color + // 1 1 1 - Reserved + +//Definitions for ucLCDPanel_SpecialHandlingCap: + +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL +#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version + +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 +#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version -#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. +#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version typedef struct _ATOM_PATCH_RECORD_MODE { @@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO #define MAX_DTD_MODE_IN_VRAM 6 #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) -#define DFP_ENCODER_TYPE_OFFSET 0x80 -#define DP_ENCODER_LANE_NUM_OFFSET 0x84 -#define DP_ENCODER_LINK_RATE_OFFSET 0x88 +//20 bytes for Encoder Type and DPCD in STD EDID area +#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) +#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) #define ATOM_HWICON1_SURFACE_ADDR 0 #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) @@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) -#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 +#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) +#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 //The size below is in Kb! #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) +#define ATOM_VRAM_RESERVE_V2_SIZE 32 + #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 @@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. }ATOM_DISPLAY_OBJECT_PATH; +typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH +{ + USHORT usDeviceTag; //supported device + USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH + USHORT usConnObjectId; //Connector Object ID + USHORT usGPUObjectId; //GPU ID + USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder +}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; + typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { UCHAR ucNumOfDispPath; @@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset #define EXT_AUXDDC_LUTINDEX_7 7 #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) +//ucChannelMapping are defined as following +//for DP connector, eDP, DP to VGA/LVDS +//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDP_Lane3_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane0_Source:2; +#else + UCHAR ucDP_Lane0_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane3_Source:2; +#endif +}ATOM_DP_CONN_CHANNEL_MAPPING; + +//for DVI/HDMI, in dual link case, both links have to have same mapping. +//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDVI_CLK_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA2_Source:2; +#else + UCHAR ucDVI_DATA2_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_CLK_Source:2; +#endif +}ATOM_DVI_CONN_CHANNEL_MAPPING; + typedef struct _EXT_DISPLAY_PATH { USHORT usDeviceTag; //A bit vector to show what devices are supported @@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT USHORT usExtEncoderObjId; //external encoder object id - USHORT usReserved[3]; + union{ + UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping + ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; + ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; + }; + UCHAR ucReserved; + USHORT usReserved[2]; }EXT_DISPLAY_PATH; #define NUMBER_OF_UCHAR_FOR_GUID 16 @@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. - UCHAR Reserved [7]; // for potential expansion + UCHAR uc3DStereoPinId; // use for eDP panel + UCHAR Reserved [6]; // for potential expansion }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; //Related definitions, all records are differnt but they have a commond header @@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 +#define ATOM_ENCODER_CAP_RECORD_TYPE 20 //Must be updated when new record type is added,equal to that record definition! -#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE +#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE typedef struct _ATOM_I2C_RECORD { @@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD UCHAR ucPadding[2]; }ATOM_ENCODER_DVO_CF_RECORD; +// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap +#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path + +typedef struct _ATOM_ENCODER_CAP_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + union { + USHORT usEncoderCap; + struct { +#if ATOM_BIG_ENDIAN + USHORT usReserved:15; // Bit1-15 may be defined for other capability in future + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. +#else + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. + USHORT usReserved:15; // Bit1-15 may be defined for other capability in future +#endif + }; + }; +}ATOM_ENCODER_CAP_RECORD; + // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 @@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage #define VOLTAGE_CONTROL_ID_DS4402 0x04 +#define VOLTAGE_CONTROL_ID_UP6266 0x05 +#define VOLTAGE_CONTROL_ID_SCORPIO 0x06 +#define VOLTAGE_CONTROL_ID_VT1556M 0x07 +#define VOLTAGE_CONTROL_ID_CHL822x 0x08 +#define VOLTAGE_CONTROL_ID_VT1586M 0x09 typedef struct _ATOM_VOLTAGE_OBJECT { @@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO #define POWER_SENSOR_GPIO 0x01 #define POWER_SENSOR_I2C 0x02 +typedef struct _ATOM_CLK_VOLT_CAPABILITY +{ + ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table + ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz +}ATOM_CLK_VOLT_CAPABILITY; + +typedef struct _ATOM_AVAILABLE_SCLK_LIST +{ + ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz + USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK + USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK +}ATOM_AVAILABLE_SCLK_LIST; + +// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition +#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] + +// this IntegrateSystemInfoTable is used for Liano/Ontario APU typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 { ATOM_COMMON_TABLE_HEADER sHeader; ULONG ulBootUpEngineClock; ULONG ulDentistVCOFreq; ULONG ulBootUpUMAClock; - ULONG ulReserved1[8]; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; ULONG ulBootUpReqDisplayVector; ULONG ulOtherDisplayMisc; ULONG ulGPUCapInfo; - ULONG ulReserved2[3]; + ULONG ulSB_MMIO_Base_Addr; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulMinEngineClock; ULONG ulSystemConfig; ULONG ulCPUCapInfo; - USHORT usMaxNBVoltage; - USHORT usMinNBVoltage; - USHORT usBootUpNBVoltage; - USHORT usExtDispConnInfoOffset; - UCHAR ucHtcTmpLmt; - UCHAR ucTjOffset; + USHORT usNBP0Voltage; + USHORT usNBP1Voltage; + USHORT usBootUpNBVoltage; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; UCHAR ucMemoryType; UCHAR ucUMAChannelNumber; ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; ULONG ulCSR_M3_ARB_CNTL_UVD[10]; ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; - ULONG ulReserved3[42]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; + ULONG ulGMCRestoreResetTime; + ULONG ulMinimumNClk; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG ulReserved3[21]; ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; }ATOM_INTEGRATED_SYSTEM_INFO_V6; +// ulGPUCapInfo +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 + +// ulOtherDisplayMisc +#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 + + /********************************************************************************************************************** -// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description -//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. -//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. -//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. -//ulReserved1[8] Reserved by now, must be 0x0. -//ulBootUpReqDisplayVector VBIOS boot up display IDs -// ATOM_DEVICE_CRT1_SUPPORT 0x0001 -// ATOM_DEVICE_CRT2_SUPPORT 0x0010 -// ATOM_DEVICE_DFP1_SUPPORT 0x0008 -// ATOM_DEVICE_DFP6_SUPPORT 0x0040 -// ATOM_DEVICE_DFP2_SUPPORT 0x0080 -// ATOM_DEVICE_DFP3_SUPPORT 0x0200 -// ATOM_DEVICE_DFP4_SUPPORT 0x0400 -// ATOM_DEVICE_DFP5_SUPPORT 0x0800 -// ATOM_DEVICE_LCD1_SUPPORT 0x0002 -//ulOtherDisplayMisc Other display related flags, not defined yet. -//ulGPUCapInfo TBD -//ulReserved2[3] must be 0x0 for the reserved. -//ulSystemConfig TBD -//ulCPUCapInfo TBD -//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. -//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. -//usBootUpNBVoltage Boot up NB voltage in unit of mv. -//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. -//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. -//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. -//ucUMAChannelNumber System memory channel numbers. -//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. -//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default -//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. -//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. + ATOM_INTEGRATED_SYSTEM_INFO_V6 Description +ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock +ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +sDISPCLK_Voltage: Report Display clock voltage requirement. + +ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: + ATOM_DEVICE_CRT1_SUPPORT 0x0001 + ATOM_DEVICE_CRT2_SUPPORT 0x0010 + ATOM_DEVICE_DFP1_SUPPORT 0x0008 + ATOM_DEVICE_DFP6_SUPPORT 0x0040 + ATOM_DEVICE_DFP2_SUPPORT 0x0080 + ATOM_DEVICE_DFP3_SUPPORT 0x0200 + ATOM_DEVICE_DFP4_SUPPORT 0x0400 + ATOM_DEVICE_DFP5_SUPPORT 0x0800 + ATOM_DEVICE_LCD1_SUPPORT 0x0002 +ulOtherDisplayMisc: Other display related flags, not defined yet. +ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. + =1: TMDS/HDMI Coherent Mode use signel PLL mode. + bit[3]=0: Enable HW AUX mode detection logic + =1: Disable HW AUX mode dettion logic +ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. + +usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). + Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; + + When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: + 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; + VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, + Changing BL using VBIOS function is functional in both driver and non-driver present environment; + and enabling VariBri under the driver environment from PP table is optional. + + 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating + that BL control from GPU is expected. + VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 + Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but + it's per platform + and enabling VariBri under the driver environment from PP table is optional. + +ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. + Threshold on value to enter HTC_active state. +ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. + To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. +ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. +ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled + =1: PCIE Power Gating Enabled + Bit[1]=0: DDR-DLL shut-down feature disabled. + 1: DDR-DLL shut-down feature enabled. + Bit[2]=0: DDR-PLL Power down feature disabled. + 1: DDR-PLL Power down feature enabled. +ulCPUCapInfo: TBD +usNBP0Voltage: VID for voltage on NB P0 State +usNBP1Voltage: VID for voltage on NB P1 State +usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. +usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure +usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set + to indicate a range. + SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 +ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. +ucUMAChannelNumber: System memory channel numbers. +ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default +ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. +ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. +sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high +ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. +ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. +ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. +ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. +ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. +usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%. +usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread. +usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. +usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. **********************************************************************************************************************/ /**************************************************************************/ @@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT #define ASIC_INTERNAL_SS_ON_LVDS 6 #define ASIC_INTERNAL_SS_ON_DP 7 #define ASIC_INTERNAL_SS_ON_DCPLL 8 +#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 { @@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 //Byte aligned defintion for BIOS usage #define ATOM_S0_CRT1_MONOb0 0x01 @@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{ #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) - +//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code +#define ACCESS_PLACEHOLDER 0x80 typedef struct _ATOM_MC_INIT_PARAM_TABLE { @@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE #define _32Mx32 0x33 #define _64Mx8 0x41 #define _64Mx16 0x42 +#define _64Mx32 0x43 +#define _128Mx8 0x51 +#define _128Mx16 0x52 +#define _256Mx8 0x61 #define SAMSUNG 0x1 #define INFINEON 0x2 @@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE #define QIMONDA INFINEON #define PROMOS MOSEL #define KRETON INFINEON +#define ELIXIR NANYA /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// -#define UCODE_ROM_START_ADDRESS 0x1c000 +#define UCODE_ROM_START_ADDRESS 0x1b800 #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode //uCode block header for reference @@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock }ATOM_VRAM_MODULE_V6; - +typedef struct _ATOM_VRAM_MODULE_V7 +{ +// Design Specific Values + ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP + USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 + USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // Current memory module ID + UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 + UCHAR ucChannelNum; // Number of mem. channels supported in this module + UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now. + UCHAR ucMisc; // RANK_OF_THISMEMORY etc. + UCHAR ucVREFI; // Not used. + UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; +// Memory Module specific values + USHORT usEMRS2Value; // EMRS2/MR2 Value. + USHORT usEMRS3Value; // EMRS3/MR3 Value. + UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + char strMemPNString[20]; // part number end with '0'. +}ATOM_VRAM_MODULE_V7; typedef struct _ATOM_VRAM_INFO_V2 { @@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4 // ATOM_INIT_REG_BLOCK aMemAdjust; }ATOM_VRAM_INFO_V4; +typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usReserved[4]; + UCHAR ucNumOfVRAMModule; // indicate number of VRAM module + UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list + UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version + UCHAR ucReserved; + ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_HEADER_V2_1; + + typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { ATOM_COMMON_TABLE_HEADER sHeader; @@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO UCHAR ucReserved; }ASIC_TRANSMITTER_INFO; +#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 +#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 + typedef struct _ASIC_ENCODER_INFO { UCHAR ucEncoderID; @@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS /* /obselete */ #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 +{ + USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucAuxId; + UCHAR ucAction; + UCHAR ucSinkType; // Iput and Output parameters. + UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucReserved[2]; +}DP_ENCODER_SERVICE_PARAMETERS_V2; + +typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 +{ + DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; + PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; +}DP_ENCODER_SERVICE_PS_ALLOCATION_V2; + +// ucAction +#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 +#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 + + // DP_TRAINING_TABLE #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) @@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 #define SELECT_DCIO_IMPCAL 4 #define SELECT_DCIO_DIG 6 #define SELECT_CRTC_PIXEL_RATE 7 +#define SELECT_VGA_BLK 8 /****************************************************************************/ //Portion VI: Definitinos for vbios MC scratch registers that driver used @@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. +#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally +#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 + +// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. +// We probably should reserve the bit 0x80 for this use. +// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). +// The driver can pick the correct internal controller based on the ASIC. + #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller +#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller typedef struct _ATOM_PPLIB_STATE { @@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 USHORT usExtendendedHeaderOffset; } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; +typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 +{ + ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; + ULONG ulGoldenPPID; // PPGen use only + ULONG ulGoldenRevision; // PPGen use only + USHORT usVddcDependencyOnSCLKOffset; + USHORT usVddciDependencyOnMCLKOffset; + USHORT usVddcDependencyOnMCLKOffset; + USHORT usMaxClockVoltageOnDCOffset; + USHORT usReserved[2]; +} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 +{ + ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; + ULONG ulTDPLimit; + ULONG ulNearTDPLimit; + ULONG ulSQRampingThreshold; + USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table + ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed. + ULONG ulReserved; +} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; + //// ATOM_PPLIB_NONCLOCK_INFO::usClassification #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 @@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 +#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 + //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 @@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 #define ATOM_PPLIB_M3ARB_MASK 0x00060000 #define ATOM_PPLIB_M3ARB_SHIFT 17 +#define ATOM_PPLIB_ENABLE_DRR 0x00080000 + +// remaining 16 bits are reserved +typedef struct _ATOM_PPLIB_THERMAL_STATE +{ + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucThermalAction; +}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; + // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 typedef struct _ATOM_PPLIB_NONCLOCK_INFO { USHORT usClassification; @@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO UCHAR ucMaxTemperature; ULONG ulCapsAndSettings; UCHAR ucRequiredPower; - UCHAR ucUnused1[3]; + USHORT usClassification2; + ULONG ulVCLK; + ULONG ulDCLK; + UCHAR ucUnused[5]; } ATOM_PPLIB_NONCLOCK_INFO; // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE::ucClockStateIndices -#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 -#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 - typedef struct _ATOM_PPLIB_R600_CLOCK_INFO { USHORT usEngineClockLow; @@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 +typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ + USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz + UCHAR ucEngineClockHigh; //clockfrequency >> 16. + UCHAR vddcIndex; //2-bit vddc index; + UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value + //please initalize to 0 + UCHAR rsv; + //please initalize to 0 + USHORT rsv1; + //please initialize to 0s + ULONG rsv2[2]; +}ATOM_PPLIB_SUMO_CLOCK_INFO; + + + +typedef struct _ATOM_PPLIB_STATE_V2 +{ + //number of valid dpm levels in this state; Driver uses it to calculate the whole + //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + UCHAR ucNumDPMLevels; + + //a index to the array of nonClockInfos + UCHAR nonClockInfoIndex; + /** + * Driver will read the first ucNumDPMLevels in this array + */ + UCHAR clockInfoIndex[1]; +} ATOM_PPLIB_STATE_V2; + +typedef struct StateArray{ + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[1]; +}StateArray; + + +typedef struct ClockInfoArray{ + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO) + UCHAR ucEntrySize; + + //this is for Sumo + ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1]; +}ClockInfoArray; + +typedef struct NonClockInfoArray{ + + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; + + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; +}NonClockInfoArray; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record +{ + USHORT usClockLow; + UCHAR ucClockHigh; + USHORT usVoltage; +}ATOM_PPLIB_Clock_Voltage_Dependency_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Dependency_Table; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record +{ + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; + USHORT usVddc; + USHORT usVddci; +}ATOM_PPLIB_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Limit_Table; + /**************************************************************************/ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4dc5b4714c5a..f7d7477daffb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -40,6 +40,61 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); +void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; + WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= EVERGREEN_GRPH_UPDATE_LOCK; + WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; + WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; +} + /* get temperature in millidegrees */ u32 evergreen_get_temp(struct radeon_device *rdev) { @@ -57,6 +112,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev) return actual_temp * 1000; } +u32 sumo_get_temp(struct radeon_device *rdev) +{ + u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; + u32 actual_temp = (temp >> 1) & 0xff; + + return actual_temp * 1000; +} + void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -888,31 +951,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (!(rdev->flags & RADEON_IS_IGP)) { + save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); + save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); + save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + } WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(D1VGA_CONTROL, 0); WREG32(D2VGA_CONTROL, 0); @@ -942,41 +1013,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + } WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); @@ -992,22 +1065,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + } WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); + } WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -1055,6 +1134,12 @@ static void evergreen_mc_program(struct radeon_device *rdev) rdev->mc.vram_end >> 12); } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + if (rdev->flags & RADEON_IS_IGP) { + tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; + tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; + tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; + WREG32(MC_FUS_VM_FB_OFFSET, tmp); + } tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); @@ -1283,6 +1368,7 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, switch (rdev->family) { case CHIP_CEDAR: case CHIP_REDWOOD: + case CHIP_PALM: force_no_swizzle = false; break; case CHIP_CYPRESS: @@ -1382,6 +1468,43 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } +static void evergreen_program_channel_remap(struct radeon_device *rdev) +{ + u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; + + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + case 1: + case 2: + case 3: + default: + /* default mapping */ + mc_shared_chremap = 0x00fac688; + break; + } + + switch (rdev->family) { + case CHIP_HEMLOCK: + case CHIP_CYPRESS: + tcp_chan_steer_lo = 0x54763210; + tcp_chan_steer_hi = 0x0000ba98; + break; + case CHIP_JUNIPER: + case CHIP_REDWOOD: + case CHIP_CEDAR: + case CHIP_PALM: + default: + tcp_chan_steer_lo = 0x76543210; + tcp_chan_steer_hi = 0x0000ba98; + break; + } + + WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); + WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); + WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); +} + static void evergreen_gpu_init(struct radeon_device *rdev) { u32 cc_rb_backend_disable = 0; @@ -1493,6 +1616,27 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; break; + case CHIP_PALM: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 2; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; } /* Initialize HDP */ @@ -1685,6 +1829,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); + evergreen_program_channel_remap(rdev); + num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; grbm_gfx_index = INSTANCE_BROADCAST_WRITES; @@ -1767,9 +1913,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) GS_PRIO(2) | ES_PRIO(3)); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: /* no vertex cache */ sq_config &= ~VC_ENABLE; + break; + default: + break; + } sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); @@ -1781,10 +1933,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: ps_thread_count = 96; - else + break; + default: ps_thread_count = 128; + break; + } sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); @@ -1815,10 +1972,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | FORCE_EOV_MAX_REZ_CNT(255))); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); - else + break; + default: vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); + break; + } vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); @@ -1902,12 +2064,18 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* Setup GPU memory space */ - /* size in MB on evergreen */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + if (rdev->flags & RADEON_IS_IGP) { + /* size in bytes on fusion */ + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); + } else { + /* size in MB on evergreen */ + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + } rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.active_vram_size = rdev->mc.visible_vram_size; - r600_vram_gtt_location(rdev, &rdev->mc); + r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); return 0; @@ -2024,17 +2192,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DACB_AUTODETECT_INT_CONTROL, 0); @@ -2060,6 +2232,7 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; + u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -2085,27 +2258,33 @@ int evergreen_irq_set(struct radeon_device *rdev) cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); crtc1 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { DRM_DEBUG("evergreen_irq_set: vblank 1\n"); crtc2 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[2]) { + if (rdev->irq.crtc_vblank_int[2] || + rdev->irq.pflip[2]) { DRM_DEBUG("evergreen_irq_set: vblank 2\n"); crtc3 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[3]) { + if (rdev->irq.crtc_vblank_int[3] || + rdev->irq.pflip[3]) { DRM_DEBUG("evergreen_irq_set: vblank 3\n"); crtc4 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[4]) { + if (rdev->irq.crtc_vblank_int[4] || + rdev->irq.pflip[4]) { DRM_DEBUG("evergreen_irq_set: vblank 4\n"); crtc5 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[5]) { + if (rdev->irq.crtc_vblank_int[5] || + rdev->irq.pflip[5]) { DRM_DEBUG("evergreen_irq_set: vblank 5\n"); crtc6 |= VBLANK_INT_MASK; } @@ -2143,10 +2322,19 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); - WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); - WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); - WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); - WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + } + + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); @@ -2158,79 +2346,92 @@ int evergreen_irq_set(struct radeon_device *rdev) return 0; } -static inline void evergreen_irq_ack(struct radeon_device *rdev, - u32 *disp_int, - u32 *disp_int_cont, - u32 *disp_int_cont2, - u32 *disp_int_cont3, - u32 *disp_int_cont4, - u32 *disp_int_cont5) +static inline void evergreen_irq_ack(struct radeon_device *rdev) { u32 tmp; - *disp_int = RREG32(DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); - *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); - *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); - *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); - - if (*disp_int & LB_D1_VBLANK_INTERRUPT) + rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); + rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); + rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); + rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); + rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); + + if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int & LB_D1_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int & DC_HPD1_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { tmp = RREG32(DC_HPD1_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD1_INT_CONTROL, tmp); } - if (*disp_int_cont & DC_HPD2_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { tmp = RREG32(DC_HPD2_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD2_INT_CONTROL, tmp); } - if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { tmp = RREG32(DC_HPD3_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD3_INT_CONTROL, tmp); } - if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { tmp = RREG32(DC_HPD4_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD4_INT_CONTROL, tmp); } - if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD5_INT_CONTROL, tmp); } - if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); @@ -2239,14 +2440,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev, void evergreen_irq_disable(struct radeon_device *rdev) { - u32 disp_int, disp_int_cont, disp_int_cont2; - u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; - r600_disable_interrupts(rdev); /* Wait and acknowledge irq */ mdelay(1); - evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, - &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_irq_ack(rdev); evergreen_disable_interrupt_state(rdev); } @@ -2286,8 +2483,6 @@ int evergreen_irq_process(struct radeon_device *rdev) u32 rptr = rdev->ih.rptr; u32 src_id, src_data; u32 ring_index; - u32 disp_int, disp_int_cont, disp_int_cont2; - u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; unsigned long flags; bool queue_hotplug = false; @@ -2308,8 +2503,7 @@ int evergreen_irq_process(struct radeon_device *rdev) restart_ih: /* display interrupts */ - evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, - &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_irq_ack(rdev); rdev->ih.wptr = wptr; while (rptr != wptr) { @@ -2322,17 +2516,21 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (disp_int & LB_D1_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D1_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D1_VLINE_INTERRUPT) { - disp_int &= ~LB_D1_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; DRM_DEBUG("IH: D1 vline\n"); } break; @@ -2344,17 +2542,21 @@ restart_ih: case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } break; case 1: /* D2 vline */ - if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { - disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; DRM_DEBUG("IH: D2 vline\n"); } break; @@ -2366,17 +2568,21 @@ restart_ih: case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[2]) + radeon_crtc_handle_flip(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } break; case 1: /* D3 vline */ - if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; DRM_DEBUG("IH: D3 vline\n"); } break; @@ -2388,17 +2594,21 @@ restart_ih: case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[3]) + radeon_crtc_handle_flip(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } break; case 1: /* D4 vline */ - if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; DRM_DEBUG("IH: D4 vline\n"); } break; @@ -2410,17 +2620,21 @@ restart_ih: case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[4]) + radeon_crtc_handle_flip(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } break; case 1: /* D5 vline */ - if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; DRM_DEBUG("IH: D5 vline\n"); } break; @@ -2432,17 +2646,21 @@ restart_ih: case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[5]) + radeon_crtc_handle_flip(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } break; case 1: /* D6 vline */ - if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; DRM_DEBUG("IH: D6 vline\n"); } break; @@ -2454,43 +2672,43 @@ restart_ih: case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (disp_int & DC_HPD1_INTERRUPT) { - disp_int &= ~DC_HPD1_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD1\n"); } break; case 1: - if (disp_int_cont & DC_HPD2_INTERRUPT) { - disp_int_cont &= ~DC_HPD2_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD2\n"); } break; case 2: - if (disp_int_cont2 & DC_HPD3_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD3\n"); } break; case 3: - if (disp_int_cont3 & DC_HPD4_INTERRUPT) { - disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD4\n"); } break; case 4: - if (disp_int_cont4 & DC_HPD5_INTERRUPT) { - disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 5: - if (disp_int_cont5 & DC_HPD6_INTERRUPT) { - disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } @@ -2666,12 +2884,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev) u32 reg; /* first check CRTCs */ - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->flags & RADEON_IS_IGP) + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + else + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); if (reg & EVERGREEN_CRTC_MASTER_EN) return true; diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index e0e590110dd4..2ccd1f0545fe 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -147,7 +147,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) radeon_ring_write(rdev, 0); radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); - if (rdev->family == CHIP_CEDAR) + if ((rdev->family == CHIP_CEDAR) || + (rdev->family == CHIP_PALM)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else @@ -331,9 +332,31 @@ set_default_state(struct radeon_device *rdev) num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; + case CHIP_PALM: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 96; + num_vs_threads = 16; + num_gs_threads = 16; + num_es_threads = 16; + num_hs_threads = 16; + num_ls_threads = 16; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; } - if (rdev->family == CHIP_CEDAR) + if ((rdev->family == CHIP_CEDAR) || + (rdev->family == CHIP_PALM)) sq_config = 0; else sq_config = VC_ENABLE; diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 2330f3a36fd5..c781c92c3451 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -105,6 +105,11 @@ #define EVERGREEN_GRPH_Y_START 0x6830 #define EVERGREEN_GRPH_X_END 0x6834 #define EVERGREEN_GRPH_Y_END 0x6838 +#define EVERGREEN_GRPH_UPDATE 0x6844 +# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) +# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) +#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848 +# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ #define EVERGREEN_CUR_CONTROL 0x6998 @@ -178,6 +183,7 @@ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) #define EVERGREEN_CRTC_STATUS 0x6e8c #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 +#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 113c70cc8b39..5b869ce86917 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -164,11 +164,13 @@ #define SE_SC_BUSY (1 << 29) #define SE_DB_BUSY (1 << 30) #define SE_CB_BUSY (1 << 31) - +/* evergreen */ #define CG_MULT_THERMAL_STATUS 0x740 #define ASIC_T(x) ((x) << 16) #define ASIC_T_MASK 0x7FF0000 #define ASIC_T_SHIFT 16 +/* APU */ +#define CG_THERMAL_STATUS 0x678 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 @@ -180,6 +182,7 @@ #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 +#define MC_SHARED_CHREMAP 0x2008 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 @@ -199,6 +202,7 @@ #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 +#define MC_FUS_VM_FB_OFFSET 0x2898 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C @@ -348,6 +352,9 @@ #define SYNC_WALKER (1 << 25) #define SYNC_ALIGNER (1 << 26) +#define TCP_CHAN_STEER_LO 0x960c +#define TCP_CHAN_STEER_HI 0x9610 + #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8e10aa9f74b0..300b4a64d8fe 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520); * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +void r100_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); + tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; + /* make sure pending bit is asserted */ + tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; + WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen as late as possible in the vblank interval. + * same field for crtc1/2 + */ + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void r100_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; + + /* Lock the graphics update lock */ + /* update the scanout addresses */ + WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); + + /* Wait for update_pending to go high. */ + while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; + WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; +} + void r100_pm_get_dynpm_state(struct radeon_device *rdev) { int i; @@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev) if (rdev->irq.gui_idle) { tmp |= RADEON_GUI_IDLE_MASK; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { tmp |= RADEON_CRTC_VBLANK_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { tmp |= RADEON_CRTC2_VBLANK_MASK; } if (rdev->irq.hpd[0]) { @@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev) } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); } if (status & RADEON_CRTC2_VBLANK_STAT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); } if (status & RADEON_FP_DETECT_STAT) { queue_hotplug = true; diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 6ac1f604e29b..fc437059918f 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -355,6 +355,8 @@ #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 +#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 + /* master controls */ #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc @@ -409,8 +411,10 @@ #define AVIVO_D1GRPH_X_END 0x6134 #define AVIVO_D1GRPH_Y_END 0x6138 #define AVIVO_D1GRPH_UPDATE 0x6144 +# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2) # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 +# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) #define AVIVO_D1CUR_CONTROL 0x6400 # define AVIVO_D1CURSOR_EN (1 << 0) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a322d4f647bd..c6a37e036f11 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); +MODULE_FIRMWARE("radeon/PALM_pfp.bin"); +MODULE_FIRMWARE("radeon/PALM_me.bin"); +MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); @@ -1161,7 +1164,7 @@ static void r600_mc_program(struct radeon_device *rdev) * Note: GTT start, end, size should be initialized before calling this * function on AGP platform. */ -void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) +static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { u64 size_bf, size_af; @@ -2000,6 +2003,10 @@ int r600_init_microcode(struct radeon_device *rdev) chip_name = "CYPRESS"; rlc_chip_name = "CYPRESS"; break; + case CHIP_PALM: + chip_name = "PALM"; + rlc_chip_name = "SUMO"; + break; default: BUG(); } @@ -2865,6 +2872,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(GRBM_INT_CNTL, 0); WREG32(DxMODE_INT_MASK, 0); + WREG32(D1GRPH_INTERRUPT_CONTROL, 0); + WREG32(D2GRPH_INTERRUPT_CONTROL, 0); if (ASIC_IS_DCE3(rdev)) { WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); @@ -2989,6 +2998,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi1, hdmi2; + u32 d1grph = 0, d2grph = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3025,11 +3035,13 @@ int r600_irq_set(struct radeon_device *rdev) cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { DRM_DEBUG("r600_irq_set: vblank 0\n"); mode_int |= D1MODE_VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { DRM_DEBUG("r600_irq_set: vblank 1\n"); mode_int |= D2MODE_VBLANK_INT_MASK; } @@ -3072,6 +3084,8 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DxMODE_INT_MASK, mode_int); + WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); + WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); if (ASIC_IS_DCE3(rdev)) { @@ -3094,32 +3108,35 @@ int r600_irq_set(struct radeon_device *rdev) return 0; } -static inline void r600_irq_ack(struct radeon_device *rdev, - u32 *disp_int, - u32 *disp_int_cont, - u32 *disp_int_cont2) +static inline void r600_irq_ack(struct radeon_device *rdev) { u32 tmp; if (ASIC_IS_DCE3(rdev)) { - *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); + rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); } else { - *disp_int = RREG32(DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = 0; - } - - if (*disp_int & LB_D1_VBLANK_INTERRUPT) + rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.r600.disp_int_cont2 = 0; + } + rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); + + if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) + WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) + WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); - if (*disp_int & LB_D1_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); - if (*disp_int & LB_D2_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); - if (*disp_int & LB_D2_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); - if (*disp_int & DC_HPD1_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD1_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3130,7 +3147,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); } } - if (*disp_int & DC_HPD2_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD2_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3141,7 +3158,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); } } - if (*disp_int_cont & DC_HPD3_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD3_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3152,18 +3169,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); } } - if (*disp_int_cont & DC_HPD4_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { tmp = RREG32(DC_HPD4_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD4_INT_CONTROL, tmp); } if (ASIC_IS_DCE32(rdev)) { - if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD5_INT_CONTROL, tmp); } - if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); @@ -3185,12 +3202,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev, void r600_irq_disable(struct radeon_device *rdev) { - u32 disp_int, disp_int_cont, disp_int_cont2; - r600_disable_interrupts(rdev); /* Wait and acknowledge irq */ mdelay(1); - r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + r600_irq_ack(rdev); r600_disable_interrupt_state(rdev); } @@ -3253,7 +3268,7 @@ int r600_irq_process(struct radeon_device *rdev) u32 wptr = r600_get_ih_wptr(rdev); u32 rptr = rdev->ih.rptr; u32 src_id, src_data; - u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; + u32 ring_index; unsigned long flags; bool queue_hotplug = false; @@ -3274,7 +3289,7 @@ int r600_irq_process(struct radeon_device *rdev) restart_ih: /* display interrupts */ - r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + r600_irq_ack(rdev); rdev->ih.wptr = wptr; while (rptr != wptr) { @@ -3287,17 +3302,21 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (disp_int & LB_D1_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D1_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D1_VLINE_INTERRUPT) { - disp_int &= ~LB_D1_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; DRM_DEBUG("IH: D1 vline\n"); } break; @@ -3309,17 +3328,21 @@ restart_ih: case 5: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (disp_int & LB_D2_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D2_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D2_VLINE_INTERRUPT) { - disp_int &= ~LB_D2_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; DRM_DEBUG("IH: D2 vline\n"); } break; @@ -3331,43 +3354,43 @@ restart_ih: case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: - if (disp_int & DC_HPD1_INTERRUPT) { - disp_int &= ~DC_HPD1_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD1\n"); } break; case 1: - if (disp_int & DC_HPD2_INTERRUPT) { - disp_int &= ~DC_HPD2_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD2\n"); } break; case 4: - if (disp_int_cont & DC_HPD3_INTERRUPT) { - disp_int_cont &= ~DC_HPD3_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD3\n"); } break; case 5: - if (disp_int_cont & DC_HPD4_INTERRUPT) { - disp_int_cont &= ~DC_HPD4_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD4\n"); } break; case 10: - if (disp_int_cont2 & DC_HPD5_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD5_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 12: - if (disp_int_cont2 & DC_HPD6_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD6_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index bff4dc4f410f..c89cfa8e0c05 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -728,6 +728,15 @@ /* DCE 3.2 */ # define DC_HPDx_EN (1 << 28) +#define D1GRPH_INTERRUPT_STATUS 0x6158 +#define D2GRPH_INTERRUPT_STATUS 0x6958 +# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0) +# define DxGRPH_PFLIP_INT_CLEAR (1 << 8) +#define D1GRPH_INTERRUPT_CONTROL 0x615c +#define D2GRPH_INTERRUPT_CONTROL 0x695c +# define DxGRPH_PFLIP_INT_MASK (1 << 0) +# define DxGRPH_PFLIP_INT_TYPE (1 << 8) + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3a7095743d44..431d4186ddf0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -69,6 +69,7 @@ #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> +#include <ttm/ttm_execbuf_util.h> #include "radeon_family.h" #include "radeon_mode.h" @@ -180,6 +181,7 @@ void rs690_pm_info(struct radeon_device *rdev); extern u32 rv6xx_get_temp(struct radeon_device *rdev); extern u32 rv770_get_temp(struct radeon_device *rdev); extern u32 evergreen_get_temp(struct radeon_device *rdev); +extern u32 sumo_get_temp(struct radeon_device *rdev); /* * Fences. @@ -259,13 +261,12 @@ struct radeon_bo { }; struct radeon_bo_list { - struct list_head list; + struct ttm_validate_buffer tv; struct radeon_bo *bo; uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; u32 tiling_flags; - bool reserved; }; /* @@ -377,11 +378,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); /* * IRQS. */ + +struct radeon_unpin_work { + struct work_struct work; + struct radeon_device *rdev; + int crtc_id; + struct radeon_fence *fence; + struct drm_pending_vblank_event *event; + struct radeon_bo *old_rbo; + u64 new_crtc_base; +}; + +struct r500_irq_stat_regs { + u32 disp_int; +}; + +struct r600_irq_stat_regs { + u32 disp_int; + u32 disp_int_cont; + u32 disp_int_cont2; + u32 d1grph_int; + u32 d2grph_int; +}; + +struct evergreen_irq_stat_regs { + u32 disp_int; + u32 disp_int_cont; + u32 disp_int_cont2; + u32 disp_int_cont3; + u32 disp_int_cont4; + u32 disp_int_cont5; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; +}; + +union radeon_irq_stat_regs { + struct r500_irq_stat_regs r500; + struct r600_irq_stat_regs r600; + struct evergreen_irq_stat_regs evergreen; +}; + struct radeon_irq { bool installed; bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ bool crtc_vblank_int[6]; + bool pflip[6]; wait_queue_head_t vblank_queue; /* FIXME: use defines for max hpd/dacs */ bool hpd[6]; @@ -392,12 +438,17 @@ struct radeon_irq { bool hdmi[2]; spinlock_t sw_lock; int sw_refcount; + union radeon_irq_stat_regs stat_regs; + spinlock_t pflip_lock[6]; + int pflip_refcount[6]; }; int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); +void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); +void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); /* * CP & ring. @@ -687,6 +738,7 @@ enum radeon_int_thermal_type { THERMAL_TYPE_RV6XX, THERMAL_TYPE_RV770, THERMAL_TYPE_EVERGREEN, + THERMAL_TYPE_SUMO, }; struct radeon_voltage { @@ -881,6 +933,10 @@ struct radeon_asic { void (*pm_finish)(struct radeon_device *rdev); void (*pm_init_profile)(struct radeon_device *rdev); void (*pm_get_dynpm_state)(struct radeon_device *rdev); + /* pageflipping */ + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); + u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); + void (*post_page_flip)(struct radeon_device *rdev, int crtc); }; /* @@ -1269,6 +1325,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) +#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM)) /* * BIOS helpers. @@ -1344,6 +1401,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) +#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) /* Common functions */ /* AGP */ @@ -1432,7 +1492,6 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode2); /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ -extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern int r600_cp_start(struct radeon_device *rdev); @@ -1478,6 +1537,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); +extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void r700_cp_stop(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 64fb89ecbf74..3d73fe484f42 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r200_asic = { @@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r300_asic = { @@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r300_asic_pcie = { @@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r420_asic = { @@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic rs400_asic = { @@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic rs600_asic = { @@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rs690_asic = { @@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rv515_asic = { @@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic r520_asic = { @@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic r600_asic = { @@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rs780_asic = { @@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &rs780_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rv770_asic = { @@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rv770_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic evergreen_asic = { @@ -749,6 +788,52 @@ static struct radeon_asic evergreen_asic = { .pm_finish = &evergreen_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &evergreen_pre_page_flip, + .page_flip = &evergreen_page_flip, + .post_page_flip = &evergreen_post_page_flip, +}; + +static struct radeon_asic sumo_asic = { + .init = &evergreen_init, + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, + .cp_commit = &r600_cp_commit, + .gpu_is_lockup = &evergreen_gpu_is_lockup, + .asic_reset = &evergreen_asic_reset, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &evergreen_irq_set, + .irq_process = &evergreen_irq_process, + .get_vblank_counter = &evergreen_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &evergreen_cs_parse, + .copy_blit = &evergreen_copy_blit, + .copy_dma = &evergreen_copy_blit, + .copy = &evergreen_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = NULL, + .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &evergreen_bandwidth_update, + .hpd_init = &evergreen_hpd_init, + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, + .gui_idle = &r600_gui_idle, + .pm_misc = &evergreen_pm_misc, + .pm_prepare = &evergreen_pm_prepare, + .pm_finish = &evergreen_pm_finish, + .pm_init_profile = &rs780_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, }; int radeon_asic_init(struct radeon_device *rdev) @@ -835,6 +920,9 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_HEMLOCK: rdev->asic = &evergreen_asic; break; + case CHIP_PALM: + rdev->asic = &sumo_asic; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -849,7 +937,9 @@ int radeon_asic_init(struct radeon_device *rdev) if (rdev->flags & RADEON_SINGLE_CRTC) rdev->num_crtc = 1; else { - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_DCE41(rdev)) + rdev->num_crtc = 2; + else if (ASIC_IS_DCE4(rdev)) rdev->num_crtc = 6; else rdev->num_crtc = 2; diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 740988244143..4970eda1bd41 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev); extern void r100_pm_finish(struct radeon_device *rdev); extern void r100_pm_init_profile(struct radeon_device *rdev); extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); +extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); /* * r200,rv250,rs300,rv280 @@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, extern void rs600_pm_misc(struct radeon_device *rdev); extern void rs600_pm_prepare(struct radeon_device *rdev); extern void rs600_pm_finish(struct radeon_device *rdev); +extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); /* * rs690,rs740 @@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev); extern void rv770_pm_misc(struct radeon_device *rdev); +extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); /* * evergreen @@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); +extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bc5a2c3382d9..8e82f672263f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1337,6 +1337,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, return false; } +static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + u16 data_offset, size; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + u8 frev, crev; + u16 percentage = 0, rate = 0; + + /* get any igp specific overrides */ + if (atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + (mode_info->atom_context->bios + data_offset); + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->usDVISSPercentage); + rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->usHDMISSPercentage); + rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + break; + } + if (percentage) + ss->percentage = percentage; + if (rate) + ss->rate = rate; + } +} + union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; @@ -1401,6 +1438,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + if (rdev->flags & RADEON_IS_IGP) + radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } } @@ -1740,495 +1779,600 @@ static const char *pp_lib_thermal_controller_names[] = { "RV6xx", "RV770", "adt7473", + "NONE", "External GPIO", "Evergreen", - "adt7473 with internal", - + "emc2103", + "Sumo", }; union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE info_4; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; }; -void radeon_atombios_get_power_modes(struct radeon_device *rdev) +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, + int state_index, + u32 misc, u32 misc2) +{ + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_POWERSAVE; + if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + } else if (state_index == 0) { + rdev->pm.power_state[state_index].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } +} + +static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; + u32 misc, misc2 = 0; + int num_modes = 0, i; + int state_index = 0; + struct radeon_i2c_bus_rec i2c_bus; + union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; + u16 data_offset; u8 frev, crev; - u32 misc, misc2 = 0, sclk, mclk; - union power_info *power_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - struct _ATOM_PPLIB_STATE *power_state; - int num_modes = 0, i, j; - int state_index = 0, mode_index = 0; - struct radeon_i2c_bus_rec i2c_bus; - - rdev->pm.default_power_state_index = -1; - if (atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - if (frev < 4) { - /* add the i2c bus for thermal/fan chip */ - if (power_info->info.ucOverdriveThermalController > 0) { - DRM_INFO("Possible %s thermal controller at 0x%02x\n", - thermal_controller_names[power_info->info.ucOverdriveThermalController], - power_info->info.ucOverdriveControllerAddress >> 1); - i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); - rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); - if (rdev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = thermal_controller_names[power_info->info. - ucOverdriveThermalController]; - info.addr = power_info->info.ucOverdriveControllerAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); - } + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + /* add the i2c bus for thermal/fan chip */ + if (power_info->info.ucOverdriveThermalController > 0) { + DRM_INFO("Possible %s thermal controller at 0x%02x\n", + thermal_controller_names[power_info->info.ucOverdriveThermalController], + power_info->info.ucOverdriveControllerAddress >> 1); + i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = thermal_controller_names[power_info->info. + ucOverdriveThermalController]; + info.addr = power_info->info.ucOverdriveControllerAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + } + } + num_modes = power_info->info.ucNumOfPowerModeEntries; + if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) + num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + /* last mode is usually default, array is low to high */ + for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + switch (frev) { + case 1: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } - num_modes = power_info->info.ucNumOfPowerModeEntries; - if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) - num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; - /* last mode is usually default, array is low to high */ - for (i = 0; i < num_modes; i++) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - switch (frev) { - case 1: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - case 2: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - case 3: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; - if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { - rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = - true; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = - power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; - } - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); + state_index++; + break; + case 2: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } - /* last mode is usually default */ - if (rdev->pm.default_power_state_index == -1) { - rdev->pm.power_state[state_index - 1].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index - 1; - rdev->pm.power_state[state_index - 1].default_clock_mode = - &rdev->pm.power_state[state_index - 1].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = 0; - rdev->pm.power_state[state_index].misc2 = 0; + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); + state_index++; + break; + case 3: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; + if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = + true; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = + power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; + } } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); + state_index++; + break; + } + } + /* last mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[state_index - 1].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index - 1; + rdev->pm.power_state[state_index - 1].default_clock_mode = + &rdev->pm.power_state[state_index - 1].clock_info[0]; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = 0; + rdev->pm.power_state[state_index].misc2 = 0; + } + return state_index; +} + +static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, + ATOM_PPLIB_THERMALCONTROLLER *controller) +{ + struct radeon_i2c_bus_rec i2c_bus; + + /* add the i2c bus for thermal/fan chip */ + if (controller->ucType > 0) { + if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; + } else if ((controller->ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || + (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || + (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { + DRM_INFO("Special thermal controller config\n"); } else { - int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - uint8_t fw_frev, fw_crev; - uint16_t fw_data_offset, vddc = 0; - union firmware_info *firmware_info; - ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; - - if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, - &fw_frev, &fw_crev, &fw_data_offset)) { - firmware_info = - (union firmware_info *)(mode_info->atom_context->bios + - fw_data_offset); - vddc = firmware_info->info_14.usBootUpVDDCVoltage; + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = pp_lib_thermal_controller_names[controller->ucType]; + info.addr = controller->ucI2cAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } + } + } +} - /* add the i2c bus for thermal/fan chip */ - if (controller->ucType > 0) { - if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; - } else if ((controller->ucType == - ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { - DRM_INFO("Special thermal controller config\n"); - } else { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", - pp_lib_thermal_controller_names[controller->ucType], - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); - rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); - if (rdev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = pp_lib_thermal_controller_names[controller->ucType]; - info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); - } +static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + u8 frev, crev; + u16 data_offset; + union firmware_info *firmware_info; + u16 vddc = 0; - } - } - /* first mode is usually default, followed by low to high */ - for (i = 0; i < power_info->info_4.ucNumStates; i++) { - mode_index = 0; - power_state = (struct _ATOM_PPLIB_STATE *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usStateArrayOffset) + - i * power_info->info_4.ucStateEntrySize); - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + - (power_state->ucNonClockStateIndex * - power_info->info_4.ucNonClockSize)); - for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { - if (rdev->flags & RADEON_IS_IGP) { - struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usLowEngineClockLow); - sclk |= clock_info->ucLowEngineClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) - continue; - /* voltage works differently on IGPs */ - mode_index++; - } else if (ASIC_IS_DCE4(rdev)) { - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usEngineClockLow); - sclk |= clock_info->ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->usMemoryClockLow); - mclk |= clock_info->ucMemoryClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; - /* XXX usVDDCI */ - mode_index++; - } else { - struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_R600_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usEngineClockLow); - sclk |= clock_info->ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->usMemoryClockLow); - mclk |= clock_info->ucMemoryClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; - mode_index++; - } - } - rdev->pm.power_state[state_index].num_clock_modes = mode_index; - if (mode_index) { - misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); - misc2 = le16_to_cpu(non_clock_info->usClassification); - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - rdev->pm.power_state[state_index].pcie_lanes = - ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> - ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_NONE: - if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - break; - } - rdev->pm.power_state[state_index].flags = 0; - if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - rdev->pm.power_state[state_index].flags |= - RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - /* patch the table values with the default slck/mclk from firmware info */ - for (j = 0; j < mode_index; j++) { - rdev->pm.power_state[state_index].clock_info[j].mclk = - rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[j].sclk = - rdev->clock.default_sclk; - if (vddc) - rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = - vddc; - } - } - state_index++; - } - } - /* if multiple clock modes, mark the lowest as no display */ - for (i = 0; i < state_index; i++) { - if (rdev->pm.power_state[i].num_clock_modes > 1) - rdev->pm.power_state[i].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - /* first mode is usually default */ - if (rdev->pm.default_power_state_index == -1) { - rdev->pm.power_state[0].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = 0; - rdev->pm.power_state[0].default_clock_mode = - &rdev->pm.power_state[0].clock_info[0]; - } + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + vddc = firmware_info->info_14.usBootUpVDDCVoltage; + } + + return vddc; +} + +static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, + int state_index, int mode_index, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) +{ + int j; + u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); + u32 misc2 = le16_to_cpu(non_clock_info->usClassification); + u16 vddc = radeon_atombios_get_default_vddc(rdev); + + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; + rdev->pm.power_state[state_index].pcie_lanes = + ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> + ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_NONE: + if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + break; + } + rdev->pm.power_state[state_index].flags = 0; + if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) + rdev->pm.power_state[state_index].flags |= + RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; + /* patch the table values with the default slck/mclk from firmware info */ + for (j = 0; j < mode_index; j++) { + rdev->pm.power_state[state_index].clock_info[j].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[j].sclk = + rdev->clock.default_sclk; + if (vddc) + rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = + vddc; + } + } +} + +static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, + int state_index, int mode_index, + union pplib_clock_info *clock_info) +{ + u32 sclk, mclk; + + if (rdev->flags & RADEON_IS_IGP) { + if (rdev->family >= CHIP_PALM) { + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + } else { + sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); + sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + } + } else if (ASIC_IS_DCE4(rdev)) { + sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); + sclk |= clock_info->evergreen.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); + mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->evergreen.usVDDC; + } else { + sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); + sclk |= clock_info->r600.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); + mclk |= clock_info->r600.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->r600.usVDDC; + } + + if (rdev->flags & RADEON_IS_IGP) { + /* skip invalid modes */ + if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) + return false; + } else { + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + return false; + } + return true; +} + +static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j; + int state_index = 0, mode_index = 0; + union pplib_clock_info *clock_info; + bool valid; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + /* first mode is usually default, followed by low to high */ + for (i = 0; i < power_info->pplib.ucNumStates; i++) { + mode_index = 0; + power_state = (union pplib_power_state *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset) + + i * power_info->pplib.ucStateEntrySize); + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + + (power_state->v1.ucNonClockStateIndex * + power_info->pplib.ucNonClockSize)); + for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { + clock_info = (union pplib_clock_info *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + + (power_state->v1.ucClockStateIndices[j] * + power_info->pplib.ucClockInfoSize)); + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + rdev->pm.power_state[state_index].num_clock_modes = mode_index; + if (mode_index) { + radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, + non_clock_info); + state_index++; + } + } + /* if multiple clock modes, mark the lowest as no display */ + for (i = 0; i < state_index; i++) { + if (rdev->pm.power_state[i].num_clock_modes > 1) + rdev->pm.power_state[i].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } + /* first mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[0].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = 0; + rdev->pm.power_state[0].default_clock_mode = + &rdev->pm.power_state[0].clock_info[0]; + } + return state_index; +} + +static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, non_clock_array_index, clock_array_index; + int state_index = 0, mode_index = 0; + union pplib_clock_info *clock_info; + struct StateArray *state_array; + struct ClockInfoArray *clock_info_array; + struct NonClockInfoArray *non_clock_info_array; + bool valid; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + state_array = (struct StateArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usStateArrayOffset); + clock_info_array = (struct ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usClockInfoArrayOffset); + non_clock_info_array = (struct NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usNonClockInfoArrayOffset); + for (i = 0; i < state_array->ucNumEntries; i++) { + mode_index = 0; + power_state = (union pplib_power_state *)&state_array->states[i]; + /* XXX this might be an inagua bug... */ + non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + /* XXX this might be an inagua bug... */ + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index]; + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + rdev->pm.power_state[state_index].num_clock_modes = mode_index; + if (mode_index) { + radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, + non_clock_info); + state_index++; + } + } + /* if multiple clock modes, mark the lowest as no display */ + for (i = 0; i < state_index; i++) { + if (rdev->pm.power_state[i].num_clock_modes > 1) + rdev->pm.power_state[i].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } + /* first mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[0].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = 0; + rdev->pm.power_state[0].default_clock_mode = + &rdev->pm.power_state[0].clock_info[0]; + } + return state_index; +} + +void radeon_atombios_get_power_modes(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + int state_index = 0; + + rdev->pm.default_power_state_index = -1; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + switch (frev) { + case 1: + case 2: + case 3: + state_index = radeon_atombios_parse_power_table_1_3(rdev); + break; + case 4: + case 5: + state_index = radeon_atombios_parse_power_table_4_5(rdev); + break; + case 6: + state_index = radeon_atombios_parse_power_table_6(rdev); + break; + default: + break; } } else { /* add the default mode */ diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 6d64a2705f12..35b5eb8fbe2a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs_ptr[i] = &p->relocs[i]; p->relocs[i].robj = p->relocs[i].gobj->driver_private; p->relocs[i].lobj.bo = p->relocs[i].robj; - p->relocs[i].lobj.rdomain = r->read_domains; p->relocs[i].lobj.wdomain = r->write_domain; + p->relocs[i].lobj.rdomain = r->read_domains; + p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle; p->relocs[i].flags = r->flags; - INIT_LIST_HEAD(&p->relocs[i].lobj.list); radeon_bo_list_add_object(&p->relocs[i].lobj, - &p->validated); + &p->validated); } } return radeon_bo_list_validate(&p->validated); @@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error && parser->ib) { - radeon_bo_list_fence(&parser->validated, parser->ib->fence); - } - radeon_bo_list_unreserve(&parser->validated); + + if (!error && parser->ib) + ttm_eu_fence_buffer_objects(&parser->validated, + parser->ib->fence); + else + ttm_eu_backoff_reservation(&parser->validated); + if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e12e79326cb1..86660cb425ab 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -81,6 +81,7 @@ static const char radeon_family_name[][16] = { "JUNIPER", "CYPRESS", "HEMLOCK", + "PALM", "LAST", }; @@ -335,7 +336,12 @@ bool radeon_card_posted(struct radeon_device *rdev) uint32_t reg; /* first check CRTCs */ - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE41(rdev)) { + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + } else if (ASIC_IS_DCE4(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1df4dc6c063c..7b17e639ab32 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -183,12 +183,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) kfree(radeon_crtc); } +/* + * Handle unpin events outside the interrupt handler proper. + */ +static void radeon_unpin_work_func(struct work_struct *__work) +{ + struct radeon_unpin_work *work = + container_of(__work, struct radeon_unpin_work, work); + int r; + + /* unpin of the old buffer */ + r = radeon_bo_reserve(work->old_rbo, false); + if (likely(r == 0)) { + r = radeon_bo_unpin(work->old_rbo); + if (unlikely(r != 0)) { + DRM_ERROR("failed to unpin buffer after flip\n"); + } + radeon_bo_unreserve(work->old_rbo); + } else + DRM_ERROR("failed to reserve buffer after flip\n"); + kfree(work); +} + +void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + struct radeon_unpin_work *work; + struct drm_pending_vblank_event *e; + struct timeval now; + unsigned long flags; + u32 update_pending; + int vpos, hpos; + + spin_lock_irqsave(&rdev->ddev->event_lock, flags); + work = radeon_crtc->unpin_work; + if (work == NULL || + !radeon_fence_signaled(work->fence)) { + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + return; + } + /* New pageflip, or just completion of a previous one? */ + if (!radeon_crtc->deferred_flip_completion) { + /* do the flip (mmio) */ + update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); + } else { + /* This is just a completion of a flip queued in crtc + * at last invocation. Make sure we go directly to + * completion routine. + */ + update_pending = 0; + radeon_crtc->deferred_flip_completion = 0; + } + + /* Has the pageflip already completed in crtc, or is it certain + * to complete in this vblank? + */ + if (update_pending && + (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, + &vpos, &hpos)) && + (vpos >=0) && + (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { + /* crtc didn't flip in this target vblank interval, + * but flip is pending in crtc. It will complete it + * in next vblank interval, so complete the flip at + * next vblank irq. + */ + radeon_crtc->deferred_flip_completion = 1; + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + return; + } + + /* Pageflip (will be) certainly completed in this vblank. Clean up. */ + radeon_crtc->unpin_work = NULL; + + /* wakeup userspace */ + if (work->event) { + e = work->event; + e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + + drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); + radeon_fence_unref(&work->fence); + radeon_post_page_flip(work->rdev, work->crtc_id); + schedule_work(&work->work); +} + +static int radeon_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_framebuffer *old_radeon_fb; + struct radeon_framebuffer *new_radeon_fb; + struct drm_gem_object *obj; + struct radeon_bo *rbo; + struct radeon_fence *fence; + struct radeon_unpin_work *work; + unsigned long flags; + u32 tiling_flags, pitch_pixels; + u64 base; + int r; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + r = radeon_fence_create(rdev, &fence); + if (unlikely(r != 0)) { + kfree(work); + DRM_ERROR("flip queue: failed to create fence.\n"); + return -ENOMEM; + } + work->event = event; + work->rdev = rdev; + work->crtc_id = radeon_crtc->crtc_id; + work->fence = radeon_fence_ref(fence); + old_radeon_fb = to_radeon_framebuffer(crtc->fb); + new_radeon_fb = to_radeon_framebuffer(fb); + /* schedule unpin of the old buffer */ + obj = old_radeon_fb->obj; + rbo = obj->driver_private; + work->old_rbo = rbo; + INIT_WORK(&work->work, radeon_unpin_work_func); + + /* We borrow the event spin lock for protecting unpin_work */ + spin_lock_irqsave(&dev->event_lock, flags); + if (radeon_crtc->unpin_work) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(work); + radeon_fence_unref(&fence); + + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + return -EBUSY; + } + radeon_crtc->unpin_work = work; + radeon_crtc->deferred_flip_completion = 0; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* pin the new buffer */ + obj = new_radeon_fb->obj; + rbo = obj->driver_private; + + DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", + work->old_rbo, rbo); + + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) { + DRM_ERROR("failed to reserve new rbo buffer before flip\n"); + goto pflip_cleanup; + } + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + r = -EINVAL; + DRM_ERROR("failed to pin new rbo buffer before flip\n"); + goto pflip_cleanup; + } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); + radeon_bo_unreserve(rbo); + + if (!ASIC_IS_AVIVO(rdev)) { + /* crtc offset is from display base addr not FB location */ + base -= radeon_crtc->legacy_display_base_addr; + pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); + + if (tiling_flags & RADEON_TILING_MACRO) { + if (ASIC_IS_R300(rdev)) { + base &= ~0x7ff; + } else { + int byteshift = fb->bits_per_pixel >> 4; + int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; + base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); + } + } else { + int offset = crtc->y * pitch_pixels + crtc->x; + switch (fb->bits_per_pixel) { + case 8: + default: + offset *= 1; + break; + case 15: + case 16: + offset *= 2; + break; + case 24: + offset *= 3; + break; + case 32: + offset *= 4; + break; + } + base += offset; + } + base &= ~7; + } + + spin_lock_irqsave(&dev->event_lock, flags); + work->new_crtc_base = base; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* update crtc fb */ + crtc->fb = fb; + + r = drm_vblank_get(dev, radeon_crtc->crtc_id); + if (r) { + DRM_ERROR("failed to get vblank before flip\n"); + goto pflip_cleanup1; + } + + /* 32 ought to cover us */ + r = radeon_ring_lock(rdev, 32); + if (r) { + DRM_ERROR("failed to lock the ring before flip\n"); + goto pflip_cleanup2; + } + + /* emit the fence */ + radeon_fence_emit(rdev, fence); + /* set the proper interrupt */ + radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); + /* fire the ring */ + radeon_ring_unlock_commit(rdev); + + return 0; + +pflip_cleanup2: + drm_vblank_put(dev, radeon_crtc->crtc_id); + +pflip_cleanup1: + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) { + DRM_ERROR("failed to reserve new rbo in error path\n"); + goto pflip_cleanup; + } + r = radeon_bo_unpin(rbo); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + r = -EINVAL; + DRM_ERROR("failed to unpin new rbo in error path\n"); + goto pflip_cleanup; + } + radeon_bo_unreserve(rbo); + +pflip_cleanup: + spin_lock_irqsave(&dev->event_lock, flags); + radeon_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + radeon_fence_unref(&fence); + kfree(work); + + return r; +} + static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set = radeon_crtc_cursor_set, .cursor_move = radeon_crtc_cursor_move, .gamma_set = radeon_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = radeon_crtc_destroy, + .page_flip = radeon_crtc_page_flip, }; static void radeon_crtc_init(struct drm_device *dev, int index) @@ -225,7 +485,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_legacy_init_crtc(dev, radeon_crtc); } -static const char *encoder_names[34] = { +static const char *encoder_names[36] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -260,6 +520,8 @@ static const char *encoder_names[34] = { "INTERNAL_KLDSCP_LVTMA", "INTERNAL_UNIPHY1", "INTERNAL_UNIPHY2", + "NUTMEG", + "TRAVIS", }; static const char *connector_names[15] = { @@ -1019,7 +1281,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, /* * Retrieve current video scanout position of crtc on a given gpu. * - * \param rdev Device to query. + * \param dev Device to query. * \param crtc Crtc to query. * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. @@ -1031,72 +1293,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * * \return Flags, or'ed together as follows: * - * RADEON_SCANOUTPOS_VALID = Query successfull. - * RADEON_SCANOUTPOS_INVBL = Inside vblank. - * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * DRM_SCANOUTPOS_VALID = Query successfull. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * this flag means that returned position may be offset by a constant but * unknown small number of scanlines wrt. real scanout position. * */ -int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) +int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) { u32 stat_crtc = 0, vbl = 0, position = 0; int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; + struct radeon_device *rdev = dev->dev_private; + if (ASIC_IS_DCE4(rdev)) { if (crtc == 0) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC0_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC1_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 2) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC2_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 3) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC3_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 4) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC4_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 5) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC5_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } else if (ASIC_IS_AVIVO(rdev)) { if (crtc == 0) { vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } else { /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ @@ -1112,7 +1376,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, if (!(stat_crtc & 1)) in_vbl = false; - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & @@ -1122,7 +1386,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, if (!(stat_crtc & 1)) in_vbl = false; - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } @@ -1133,13 +1397,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* Valid vblank area boundaries from gpu retrieved? */ if (vbl > 0) { /* Yes: Decode. */ - ret |= RADEON_SCANOUTPOS_ACCURATE; + ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; } else { /* No: Fake something reasonable which gives at least ok results. */ - vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; + vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; vbl_end = 0; } @@ -1155,7 +1419,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* Inside "upper part" of vblank area? Apply corrective offset if so: */ if (in_vbl && (*vpos >= vbl_start)) { - vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; + vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; *vpos = *vpos - vtotal; } @@ -1164,7 +1428,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* In vblank? */ if (in_vbl) - ret |= RADEON_SCANOUTPOS_INVBL; + ret |= DRM_SCANOUTPOS_INVBL; return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e4ea925900..a92d2a5cea90 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,9 +48,10 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs + * 2.8.0 - pageflip support */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 7 +#define KMS_DRIVER_MINOR 8 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev); u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); +int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); void radeon_driver_irq_preinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev); @@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_object_init(struct drm_gem_object *obj); void radeon_gem_object_free(struct drm_gem_object *obj); +extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + int *vpos, int *hpos); extern struct drm_ioctl_desc radeon_ioctls_kms[]; extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); @@ -277,6 +284,8 @@ static struct drm_driver kms_driver = { .get_vblank_counter = radeon_get_vblank_counter_kms, .enable_vblank = radeon_enable_vblank_kms, .disable_vblank = radeon_disable_vblank_kms, + .get_vblank_timestamp = radeon_get_vblank_timestamp_kms, + .get_scanout_position = radeon_get_crtc_scanoutpos, #if defined(CONFIG_DEBUG_FS) .debugfs_init = radeon_debugfs_init, .debugfs_cleanup = radeon_debugfs_cleanup, diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 041943df966b..e4e64a80b58d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -713,7 +713,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * DIG1/2 can drive UNIPHY0/1/2 link A or link B * * DCE 4.0 - * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 6 DIG encoder blocks. * - DIG to PHY mapping is hardcoded @@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * DIG5 drives UNIPHY2 link A, A+B * DIG6 drives UNIPHY2 link B * + * DCE 4.1 + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). + * Supports up to 6 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * * Routing * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) * Examples: @@ -904,9 +910,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else args.v3.ucLaneNum = 4; - if (dig->linkb) { - args.v3.acConfig.ucLinkSel = 1; - args.v3.acConfig.ucEncoderSel = 1; + if (ASIC_IS_DCE41(rdev)) { + args.v3.acConfig.ucEncoderSel = dig->dig_encoder; + if (dig->linkb) + args.v3.acConfig.ucLinkSel = 1; + } else { + if (dig->linkb) { + args.v3.acConfig.ucLinkSel = 1; + args.v3.acConfig.ucEncoderSel = 1; + } } /* Select the PLL for the PHY @@ -1044,6 +1056,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) union external_encoder_control { EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; }; static void @@ -1054,6 +1067,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); union external_encoder_control args; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); @@ -1061,6 +1075,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; + u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -1099,6 +1114,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, else args.v1.sDigEncoder.ucLaneNum = 4; break; + case 3: + args.v3.sExtEncoder.ucAction = action; + if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) + args.v3.sExtEncoder.usConnectorId = connector_object_id; + else + args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { + if (dp_clock == 270000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; + else if (dp_clock == 540000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; + args.v3.sExtEncoder.ucLaneNum = dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v3.sExtEncoder.ucLaneNum = 8; + else + args.v3.sExtEncoder.ucLaneNum = 4; + switch (ext_enum) { + case GRAPH_OBJECT_ENUM_ID1: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; + break; + case GRAPH_OBJECT_ENUM_ID2: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; + break; + case GRAPH_OBJECT_ENUM_ID3: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; + break; + } + args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; @@ -1289,12 +1335,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: default: - action = ATOM_ENABLE; + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; + else + action = ATOM_ENABLE; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - action = ATOM_DISABLE; + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; + else + action = ATOM_DISABLE; break; } atombios_external_encoder_setup(encoder, ext_encoder, action); @@ -1483,6 +1535,11 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; + /* on DCE41 and encoder can driver any phy so just crtc id */ + if (ASIC_IS_DCE41(rdev)) { + return radeon_crtc->crtc_id; + } + if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; switch (radeon_encoder->encoder_id) { @@ -1610,7 +1667,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } if (ext_encoder) { - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) { + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); + } else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); } atombios_apply_encoder_quirks(encoder, adjusted_mode); @@ -2029,6 +2092,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t case ENCODER_OBJECT_ID_TITFP513: case ENCODER_OBJECT_ID_VT1623: case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: /* these are handled by the primary encoders */ radeon_encoder->is_ext_encoder = true; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index e329066dcabd..4c222d5437d1 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -80,6 +80,7 @@ enum radeon_family { CHIP_JUNIPER, CHIP_CYPRESS, CHIP_HEMLOCK, + CHIP_PALM, CHIP_LAST, }; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index daacb281dfaf..171b0b2e3a64 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -38,6 +38,7 @@ #include "drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_trace.h" int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) } else radeon_fence_ring_emit(rdev, fence); + trace_radeon_fence_emit(rdev->ddev, fence->seq); fence->emited = true; list_del(&fence->list); list_add_tail(&fence->list, &rdev->fence_drv.emited); @@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) retry: /* save current sequence used to check for GPU lockup */ seq = rdev->fence_drv.last_seq; + trace_radeon_fence_wait_begin(rdev->ddev, seq); if (intr) { radeon_irq_kms_sw_irq_get(rdev); r = wait_event_interruptible_timeout(rdev->fence_drv.queue, @@ -227,6 +230,7 @@ retry: radeon_fence_signaled(fence), timeout); radeon_irq_kms_sw_irq_put(rdev); } + trace_radeon_fence_wait_end(rdev->ddev, seq); if (unlikely(!radeon_fence_signaled(fence))) { /* we were interrupted for some reason and fence isn't * isn't signaled yet, resume wait diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a108c7ed14f5..c6861bb751ad 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; + rdev->irq.pflip[i] = false; + } radeon_irq_set(rdev); /* Clear bits */ radeon_irq_process(rdev); @@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; + rdev->irq.pflip[i] = false; + } radeon_irq_set(rdev); } @@ -121,7 +125,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) * chips. Disable MSI on them for now. */ if ((rdev->family >= CHIP_RV380) && - (!(rdev->flags & RADEON_IS_IGP)) && + ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && (!(rdev->flags & RADEON_IS_AGP))) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { @@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); } +void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) +{ + unsigned long irqflags; + + if (crtc < 0 || crtc >= rdev->num_crtc) + return; + + spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { + rdev->irq.pflip[crtc] = true; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); +} + +void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) +{ + unsigned long irqflags; + + if (crtc < 0 || crtc >= rdev->num_crtc) + return; + + spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); + if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { + rdev->irq.pflip[crtc] = false; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); +} + diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8fbbe1c6ebbd..4bf423ca4c12 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -277,6 +277,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) radeon_irq_set(rdev); } +int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *drmcrtc; + struct radeon_device *rdev = dev->dev_private; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Get associated drm_crtc: */ + drmcrtc = &rdev->mode_info.crtcs[crtc]->base; + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, + vblank_time, flags, + drmcrtc); +} /* * IOCTL. diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e301c6f9e059..f406f02bf14e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -277,6 +277,9 @@ struct radeon_crtc { fixed20_12 hsc; struct drm_display_mode native_mode; int pll_id; + /* page flipping */ + struct radeon_unpin_work *unpin_work; + int deferred_flip_completion; }; struct radeon_encoder_primary_dac { @@ -442,10 +445,6 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; -/* radeon_get_crtc_scanoutpos() return flags */ -#define RADEON_SCANOUTPOS_VALID (1 << 0) -#define RADEON_SCANOUTPOS_INVBL (1 << 1) -#define RADEON_SCANOUTPOS_ACCURATE (1 << 2) extern enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev); @@ -562,7 +561,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); -extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); +extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + int *vpos, int *hpos); extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern struct edid * @@ -662,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); void radeon_fb_output_poll_changed(struct radeon_device *rdev); + +void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index a598d0049aa5..7d6b8e88f746 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -34,6 +34,7 @@ #include <drm/drmP.h> #include "radeon_drm.h" #include "radeon.h" +#include "radeon_trace.h" int radeon_ttm_init(struct radeon_device *rdev); @@ -146,6 +147,7 @@ retry: list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&bo->rdev->gem.mutex); } + trace_radeon_bo_create(bo); return 0; } @@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head) { if (lobj->wdomain) { - list_add(&lobj->list, head); + list_add(&lobj->tv.head, head); } else { - list_add_tail(&lobj->list, head); - } -} - -int radeon_bo_list_reserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - int r; - - list_for_each_entry(lobj, head, list){ - r = radeon_bo_reserve(lobj->bo, false); - if (unlikely(r != 0)) - return r; - lobj->reserved = true; - } - return 0; -} - -void radeon_bo_list_unreserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - - list_for_each_entry(lobj, head, list) { - /* only unreserve object we successfully reserved */ - if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) - radeon_bo_unreserve(lobj->bo); + list_add_tail(&lobj->tv.head, head); } } @@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head) u32 domain; int r; - list_for_each_entry(lobj, head, list) { - lobj->reserved = false; - } - r = radeon_bo_list_reserve(head); + r = ttm_eu_reserve_buffers(head); if (unlikely(r != 0)) { return r; } - list_for_each_entry(lobj, head, list) { + list_for_each_entry(lobj, head, tv.head) { bo = lobj->bo; if (!bo->pin_count) { domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; @@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head) return 0; } -void radeon_bo_list_fence(struct list_head *head, void *fence) -{ - struct radeon_bo_list *lobj; - struct radeon_bo *bo; - struct radeon_fence *old_fence = NULL; - - list_for_each_entry(lobj, head, list) { - bo = lobj->bo; - spin_lock(&bo->tbo.lock); - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; - bo->tbo.sync_obj = radeon_fence_ref(fence); - bo->tbo.sync_obj_arg = NULL; - spin_unlock(&bo->tbo.lock); - if (old_fence) { - radeon_fence_unref(&old_fence); - } - } -} - int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) { diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index d143702b244a..22d4c237dea5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); if (unlikely(r != 0)) return r; - spin_lock(&bo->tbo.lock); + spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; if (bo->tbo.sync_obj) r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.lock); + spin_unlock(&bo->tbo.bdev->fence_lock); ttm_bo_unreserve(&bo->tbo); return r; } @@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev); extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head); -extern int radeon_bo_list_reserve(struct list_head *head); -extern void radeon_bo_list_unreserve(struct list_head *head); extern int radeon_bo_list_validate(struct list_head *head); -extern void radeon_bo_list_fence(struct list_head *head, void *fence); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8c9b2ef32c68..4de7776bd1c5 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -449,6 +449,9 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, case THERMAL_TYPE_EVERGREEN: temp = evergreen_get_temp(rdev); break; + case THERMAL_TYPE_SUMO: + temp = sumo_get_temp(rdev); + break; default: temp = 0; break; @@ -487,6 +490,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev) case THERMAL_TYPE_RV6XX: case THERMAL_TYPE_RV770: case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_SUMO: rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); if (IS_ERR(rdev->pm.int_hwmon_dev)) { err = PTR_ERR(rdev->pm.int_hwmon_dev); @@ -720,9 +724,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); - if ((vbl_status & RADEON_SCANOUTPOS_VALID) && - !(vbl_status & RADEON_SCANOUTPOS_INVBL)) + vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); + if ((vbl_status & DRM_SCANOUTPOS_VALID) && + !(vbl_status & DRM_SCANOUTPOS_INVBL)) in_vbl = false; } } diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 64928814de53..0a310b7f71c3 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -422,6 +422,7 @@ # define RADEON_CRTC_CSYNC_EN (1 << 4) # define RADEON_CRTC_ICON_EN (1 << 15) # define RADEON_CRTC_CUR_EN (1 << 16) +# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17) # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) # define RADEON_CRTC_CUR_MODE_SHIFT 20 # define RADEON_CRTC_CUR_MODE_MONO 0 @@ -509,6 +510,8 @@ # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) +# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28) +# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29) #define R300_CRTC_TILE_X0_Y0 0x0350 #define R300_CRTC2_TILE_X0_Y0 0x0358 diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h new file mode 100644 index 000000000000..eafd8160a155 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -0,0 +1,82 @@ +#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RADEON_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM radeon +#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) +#define TRACE_INCLUDE_FILE radeon_trace + +TRACE_EVENT(radeon_bo_create, + TP_PROTO(struct radeon_bo *bo), + TP_ARGS(bo), + TP_STRUCT__entry( + __field(struct radeon_bo *, bo) + __field(u32, pages) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->pages = bo->tbo.num_pages; + ), + TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) +); + +DECLARE_EVENT_CLASS(radeon_fence_request, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c new file mode 100644 index 000000000000..8175993df84d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace_points.c @@ -0,0 +1,9 @@ +/* Copyright Red Hat Inc 2010. + * Author : Dave Airlie <airlied@redhat.com> + */ +#include <drm/drmP.h> +#include "radeon_drm.h" +#include "radeon.h" + +#define CREATE_TRACE_POINTS +#include "radeon_trace.h" diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index f1c6e02c2e6b..9a85b1614c86 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -46,6 +46,56 @@ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); +void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; + WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen anywhere in vblank interval */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void rs600_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; +} + void rs600_pm_misc(struct radeon_device *rdev) { int requested_index = rdev->pm.requested_power_state_index; @@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev) if (rdev->irq.gui_idle) { tmp |= S_000040_GUI_IDLE(1); } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } if (rdev->irq.hpd[0]) { @@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev) return 0; } -static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) +static inline u32 rs600_irq_ack(struct radeon_device *rdev) { uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); uint32_t irq_mask = S_000044_SW_INT(1); @@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ } if (G_000044_DISPLAY_INT_STAT(irqs)) { - *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { + rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { WREG32(R_006534_D1MODE_VBLANK_STATUS, S_006534_D1MODE_VBLANK_ACK(1)); } - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { WREG32(R_006D34_D2MODE_VBLANK_STATUS, S_006D34_D2MODE_VBLANK_ACK(1)); } - if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); } - if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); } } else { - *r500_disp_int = 0; + rdev->irq.stat_regs.r500.disp_int = 0; } if (irqs) { @@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ void rs600_irq_disable(struct radeon_device *rdev) { - u32 tmp; - WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_006540_DxMODE_INT_MASK, 0); /* Wait and acknowledge irq */ mdelay(1); - rs600_irq_ack(rdev, &tmp); + rs600_irq_ack(rdev); } int rs600_irq_process(struct radeon_device *rdev) { - uint32_t status, msi_rearm; - uint32_t r500_disp_int; + u32 status, msi_rearm; bool queue_hotplug = false; /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; - status = rs600_irq_ack(rdev, &r500_disp_int); - if (!status && !r500_disp_int) { + status = rs600_irq_ack(rdev); + if (!status && !rdev->irq.stat_regs.r500.disp_int) { return IRQ_NONE; } - while (status || r500_disp_int) { + while (status || rdev->irq.stat_regs.r500.disp_int) { /* SW interrupt */ - if (G_000044_SW_INT(status)) + if (G_000044_SW_INT(status)) { radeon_fence_process(rdev); + } /* GUI idle */ if (G_000040_GUI_IDLE(status)) { rdev->irq.gui_idle_acked = true; @@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.idle_queue); } /* Vertical blank interrupts */ - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); } - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); } - if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; DRM_DEBUG("HPD1\n"); } - if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; DRM_DEBUG("HPD2\n"); } - status = rs600_irq_ack(rdev, &r500_disp_int); + status = rs600_irq_ack(rdev); } /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4dfead8cee33..645aa1fd7611 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -42,6 +42,40 @@ static void rv770_gpu_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); +u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + if (radeon_crtc->crtc_id) { + WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + } else { + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + } + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; +} + /* get temperature in millidegrees */ u32 rv770_get_temp(struct radeon_device *rdev) { @@ -489,6 +523,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } +static void rv770_program_channel_remap(struct radeon_device *rdev) +{ + u32 tcp_chan_steer, mc_shared_chremap, tmp; + bool force_no_swizzle; + + switch (rdev->family) { + case CHIP_RV770: + case CHIP_RV730: + force_no_swizzle = false; + break; + case CHIP_RV710: + case CHIP_RV740: + default: + force_no_swizzle = true; + break; + } + + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + case 1: + default: + /* default mapping */ + mc_shared_chremap = 0x00fac688; + break; + case 2: + case 3: + if (force_no_swizzle) + mc_shared_chremap = 0x00fac688; + else + mc_shared_chremap = 0x00bbc298; + break; + } + + if (rdev->family == CHIP_RV740) + tcp_chan_steer = 0x00ef2a60; + else + tcp_chan_steer = 0x00fac688; + + WREG32(TCP_CHAN_STEER, tcp_chan_steer); + WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); +} + static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; @@ -688,6 +765,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + rv770_program_channel_remap(rdev); + WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); @@ -956,6 +1035,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->vram_scratch.robj); } +void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) +{ + u64 size_bf, size_af; + + if (mc->mc_vram_size > 0xE0000000) { + /* leave room for at least 512M GTT */ + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xE0000000; + mc->mc_vram_size = 0xE0000000; + } + if (rdev->flags & RADEON_IS_AGP) { + size_bf = mc->gtt_start; + size_af = 0xFFFFFFFF - mc->gtt_end + 1; + if (size_bf > size_af) { + if (mc->mc_vram_size > size_bf) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_bf; + mc->mc_vram_size = size_bf; + } + mc->vram_start = mc->gtt_start - mc->mc_vram_size; + } else { + if (mc->mc_vram_size > size_af) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_af; + mc->mc_vram_size = size_af; + } + mc->vram_start = mc->gtt_end; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); + } else { + radeon_vram_location(rdev, &rdev->mc, 0); + rdev->mc.gtt_base_align = 0; + radeon_gtt_location(rdev, mc); + } +} + int rv770_mc_init(struct radeon_device *rdev) { u32 tmp; @@ -996,7 +1114,7 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.active_vram_size = rdev->mc.visible_vram_size; - r600_vram_gtt_location(rdev, &rdev->mc); + r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); return 0; diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index b7a5a20e81dc..fc77e1e1a179 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -138,6 +138,7 @@ #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 +#define MC_SHARED_CHREMAP 0x2008 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 @@ -303,6 +304,7 @@ #define BILINEAR_PRECISION_8_BIT (1 << 31) #define TCP_CNTL 0x9610 +#define TCP_CHAN_STEER 0x9614 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x)<<0) @@ -351,4 +353,11 @@ #define SRBM_STATUS 0x0E50 +#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 +#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 +#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 +#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 +#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c +#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c + #endif diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 148a322d8f5d..cf2ec562550e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) } EXPORT_SYMBOL(ttm_bo_wait_unreserved); -static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } -/** - * Call with the lru_lock held. - */ - -static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; @@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, /** * Deadlock avoidance for multi-bo reserving. */ - if (use_sequence && bo->seq_valid && - (sequence - bo->val_seq < (1 << 31))) { - return -EAGAIN; + if (use_sequence && bo->seq_valid) { + /** + * We've already reserved this one. + */ + if (unlikely(sequence == bo->val_seq)) + return -EDEADLK; + /** + * Already reserved by a thread that will not back + * off for us. We need to back off. + */ + if (unlikely(sequence - bo->val_seq < (1 << 31))) + return -EAGAIN; } if (no_wait) @@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref) BUG(); } +void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free) +{ + kref_sub(&bo->list_kref, count, + (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); +} + int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) @@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return ret; } +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +{ + ttm_bo_add_to_lru(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); +} + void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); - ttm_bo_add_to_lru(bo); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_locked(bo); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); @@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, int ret = 0; if (old_is_pci || new_is_pci || - ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) - ttm_bo_unmap_virtual(bo); + ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { + ret = ttm_mem_io_lock(old_man, true); + if (unlikely(ret != 0)) + goto out_err; + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(old_man); + } /* * Create and bind a ttm if required. @@ -416,11 +437,9 @@ moved: } if (bo->mem.mm_node) { - spin_lock(&bo->lock); bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; - spin_unlock(&bo->lock); } else bo->offset = 0; @@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } - ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); @@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int put_count; int ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { spin_lock(&glob->lru_lock); /** - * Lock inversion between bo::reserve and bo::lock here, + * Lock inversion between bo:reserve and bdev::fence_lock here, * but that's OK, since we're only trylocking. */ @@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) if (unlikely(ret == -EBUSY)) goto queue; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return; } else { @@ -512,7 +529,7 @@ queue: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (sync_obj) { driver->sync_obj_flush(sync_obj, sync_obj_arg); @@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool no_wait_reserve, bool no_wait_gpu) { + struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int put_count; int ret = 0; retry: - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) return ret; @@ -580,8 +598,7 @@ retry: spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return 0; } @@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); @@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); + ttm_mem_io_lock(man, false); + ttm_mem_io_free_vm(bo); + ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); @@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { @@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved = false; + evict_mem.bus.io_reserved_vm = false; + evict_mem.bus.io_reserved_count = 0; placement.fpfn = 0; placement.lpfn = 0; @@ -802,8 +824,7 @@ retry: BUG_ON(ret != 0); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); @@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, { int ret = 0; struct ttm_mem_reg mem; + struct ttm_bo_device *bdev = bo->bdev; BUG_ON(!atomic_read(&bo->reserved)); @@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved = false; + mem.bus.io_reserved_vm = false; + mem.bus.io_reserved_count = 0; /* * Determine where to move the buffer. */ @@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } bo->destroy = destroy; - spin_lock_init(&bo->lock); kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); @@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); + INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved = false; + bo->mem.bus.io_reserved_vm = false; + bo->mem.bus.io_reserved_count = 0; bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); @@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; BUG_ON(man->has_type); + man->io_reserve_fastpath = true; + man->use_io_reserve_lru = false; + mutex_init(&man->io_reserve_mutex); + INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) @@ -1527,7 +1555,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; - + bdev->val_seq = 0; + spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&glob->device_list_mutex); @@ -1561,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return true; } -void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; loff_t offset = (loff_t) bo->addr_space_offset; @@ -1570,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) if (!bdev->dev_mapping) return; unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); - ttm_mem_io_free(bdev, &bo->mem); + ttm_mem_io_free_vm(bo); +} + +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + + ttm_mem_io_lock(man, false); + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(man); } + + EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) @@ -1651,6 +1692,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; + struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; void *sync_obj_arg; int ret = 0; @@ -1664,9 +1706,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); continue; } @@ -1675,29 +1717,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); return ret; } - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (likely(bo->sync_obj == sync_obj && bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } else { - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } } return 0; @@ -1706,6 +1748,7 @@ EXPORT_SYMBOL(ttm_bo_wait); int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { + struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* @@ -1715,9 +1758,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1783,16 +1826,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. */ - spin_lock(&bo->lock); + spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3106d5bcce32..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) { - int ret; + if (likely(man->io_reserve_fastpath)) + return 0; + + if (interruptible) + return mutex_lock_interruptible(&man->io_reserve_mutex); + + mutex_lock(&man->io_reserve_mutex); + return 0; +} - if (!mem->bus.io_reserved) { - mem->bus.io_reserved = true; +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +{ + if (likely(man->io_reserve_fastpath)) + return; + + mutex_unlock(&man->io_reserve_mutex); +} + +static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +{ + struct ttm_buffer_object *bo; + + if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) + return -EAGAIN; + + bo = list_first_entry(&man->io_reserve_lru, + struct ttm_buffer_object, + io_reserve_lru); + list_del_init(&bo->io_reserve_lru); + ttm_bo_unmap_virtual_locked(bo); + + return 0; +} + +static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + int ret = 0; + + if (!bdev->driver->io_mem_reserve) + return 0; + if (likely(man->io_reserve_fastpath)) + return bdev->driver->io_mem_reserve(bdev, mem); + + if (bdev->driver->io_mem_reserve && + mem->bus.io_reserved_count++ == 0) { +retry: ret = bdev->driver->io_mem_reserve(bdev, mem); + if (ret == -EAGAIN) { + ret = ttm_mem_io_evict(man); + if (ret == 0) + goto retry; + } + } + return ret; +} + +static void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + if (likely(man->io_reserve_fastpath)) + return; + + if (bdev->driver->io_mem_reserve && + --mem->bus.io_reserved_count == 0 && + bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); + +} + +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *mem = &bo->mem; + int ret; + + if (!mem->bus.io_reserved_vm) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[mem->mem_type]; + + ret = ttm_mem_io_reserve(bo->bdev, mem); if (unlikely(ret != 0)) return ret; + mem->bus.io_reserved_vm = true; + if (man->use_io_reserve_lru) + list_add_tail(&bo->io_reserve_lru, + &man->io_reserve_lru); } return 0; } -void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) { - if (bdev->driver->io_mem_reserve) { - if (mem->bus.io_reserved) { - mem->bus.io_reserved = false; - bdev->driver->io_mem_free(bdev, mem); - } + struct ttm_mem_reg *mem = &bo->mem; + + if (mem->bus.io_reserved_vm) { + mem->bus.io_reserved_vm = false; + list_del_init(&bo->io_reserve_lru); + ttm_mem_io_free(bo->bdev, mem); } } int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; int ret; void *addr; *virtual = NULL; + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); + ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, else addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, if (virtual && mem->bus.addr == NULL) iounmap(virtual); + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_mem_reg old_copy; void *old_iomap; void *new_iomap; int ret; @@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, } mb(); out2: - ttm_bo_free_old_node(bo); - + old_copy = *old_mem; *old_mem = *new_mem; new_mem->mm_node = NULL; @@ -292,9 +381,10 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); + ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); out: ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_bo_mem_put(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); @@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - spin_lock_init(&fbo->lock); init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); + INIT_LIST_HEAD(&fbo->io_reserve_lru); fbo->vm_node = NULL; atomic_set(&fbo->cpu_writers, 0); @@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; unsigned long offset, size; int ret; @@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) return -EPERM; #endif + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { + struct ttm_buffer_object *bo = map->bo; + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; + if (!map->virtual) return; switch (map->bo_kmap_type) { case ttm_bo_map_iomap: iounmap(map->virtual); - ttm_mem_io_free(map->bo->bdev, &map->bo->mem); break; case ttm_bo_map_vmap: vunmap(map->virtual); @@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } @@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; @@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) @@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int i; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; + struct ttm_mem_type_manager *man = + &bdev->man[bo->mem.mem_type]; /* * Work around locking order reversal in fault / nopfn @@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * move. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } } else - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); - - ret = ttm_mem_io_reserve(bdev, &bo->mem); - if (ret) { - retval = VM_FAULT_SIGBUS; + ret = ttm_mem_io_lock(man, true); + if (unlikely(ret != 0)) { + retval = VM_FAULT_NOPAGE; goto out_unlock; } + ret = ttm_mem_io_reserve_vm(bo); + if (unlikely(ret != 0)) { + retval = VM_FAULT_SIGBUS; + goto out_io_unlock; + } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + bo->vm_node->start - vma->vm_pgoff; @@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } /* @@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) page = ttm_tt_get_page(ttm, page_offset); if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; - goto out_unlock; + goto out_io_unlock; } else if (unlikely(!page)) { break; } @@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) else if (unlikely(ret != 0)) { retval = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) break; } - +out_io_unlock: + ttm_mem_io_unlock(man); out_unlock: ttm_bo_unreserve(bo); return retval; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,7 +32,7 @@ #include <linux/sched.h> #include <linux/module.h> -void ttm_eu_backoff_reservation(struct list_head *list) +static void ttm_eu_backoff_reservation_locked(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list) if (!entry->reserved) continue; + if (entry->removed) { + ttm_bo_add_to_lru(bo); + entry->removed = false; + + } entry->reserved = false; - ttm_bo_unreserve(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + } +} + +static void ttm_eu_del_from_lru_locked(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + if (!entry->removed) { + entry->put_count = ttm_bo_del_from_lru(bo); + entry->removed = true; + } } } + +static void ttm_eu_list_ref_sub(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + if (entry->put_count) { + ttm_bo_list_ref_sub(bo, entry->put_count, true); + entry->put_count = 0; + } + } +} + +static int ttm_eu_wait_unreserved_locked(struct list_head *list, + struct ttm_buffer_object *bo) +{ + struct ttm_bo_global *glob = bo->glob; + int ret; + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_wait_unreserved(bo, true); + spin_lock(&glob->lru_lock); + if (unlikely(ret != 0)) + ttm_eu_backoff_reservation_locked(list); + return ret; +} + + +void ttm_eu_backoff_reservation(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + struct ttm_bo_global *glob; + + if (list_empty(list)) + return; + + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; + spin_lock(&glob->lru_lock); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); +} EXPORT_SYMBOL(ttm_eu_backoff_reservation); /* @@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) +int ttm_eu_reserve_buffers(struct list_head *list) { + struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; + uint32_t val_seq; + + if (list_empty(list)) + return 0; + + list_for_each_entry(entry, list, head) { + entry->reserved = false; + entry->put_count = 0; + entry->removed = false; + } + + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; retry: + spin_lock(&glob->lru_lock); + val_seq = entry->bo->bdev->val_seq++; + list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - entry->reserved = false; - ret = ttm_bo_reserve(bo, true, false, true, val_seq); - if (ret != 0) { - ttm_eu_backoff_reservation(list); - if (ret == -EAGAIN) { - ret = ttm_bo_wait_unreserved(bo, true); - if (unlikely(ret != 0)) - return ret; - goto retry; - } else +retry_this_bo: + ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + switch (ret) { + case 0: + break; + case -EBUSY: + ret = ttm_eu_wait_unreserved_locked(list, bo); + if (unlikely(ret != 0)) { + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); return ret; + } + goto retry_this_bo; + case -EAGAIN: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + ret = ttm_bo_wait_unreserved(bo, true); + if (unlikely(ret != 0)) + return ret; + goto retry; + default: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation(list); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); ret = ttm_bo_wait_cpu(bo, false); if (ret) return ret; goto retry; } } + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return 0; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; + struct ttm_buffer_object *bo; + struct ttm_bo_global *glob; + struct ttm_bo_device *bdev; + struct ttm_bo_driver *driver; - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - struct ttm_bo_driver *driver = bo->bdev->driver; - void *old_sync_obj; + if (list_empty(list)) + return; + + bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; + bdev = bo->bdev; + driver = bdev->driver; + glob = bo->glob; - spin_lock(&bo->lock); - old_sync_obj = bo->sync_obj; + spin_lock(&bdev->fence_lock); + spin_lock(&glob->lru_lock); + + list_for_each_entry(entry, list, head) { + bo = entry->bo; + entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = entry->new_sync_obj_arg; - spin_unlock(&bo->lock); - ttm_bo_unreserve(bo); + ttm_bo_unreserve_locked(bo); entry->reserved = false; - if (old_sync_obj) - driver->sync_obj_unref(&old_sync_obj); + } + spin_unlock(&glob->lru_lock); + spin_unlock(&bdev->fence_lock); + + list_for_each_entry(entry, list, head) { + if (entry->old_sync_obj) + driver->sync_obj_unref(&entry->old_sync_obj); } } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e7a58d055041..10fc01f69c40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -264,7 +264,6 @@ struct vmw_private { */ struct vmw_sw_context ctx; - uint32_t val_seq; struct mutex cmdbuf_mutex; /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 76954e3528c1..41b95ed6dbcd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); if (unlikely(ret != 0)) goto out_err; - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, - dev_priv->val_seq++); + ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); if (unlikely(ret != 0)) goto out_err; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 628f76772d22..0f14f94ed8f4 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -683,6 +683,21 @@ struct drm_master { void *driver_priv; /**< Private structure for driver to use */ }; +/* Size of ringbuffer for vblank timestamps. Just double-buffer + * in initial implementation. + */ +#define DRM_VBLANKTIME_RBSIZE 2 + +/* Flags and return codes for get_vblank_timestamp() driver function. */ +#define DRM_CALLED_FROM_VBLIRQ 1 +#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) +#define DRM_VBLANKTIME_INVBL (1 << 1) + +/* get_scanout_position() return flags */ +#define DRM_SCANOUTPOS_VALID (1 << 0) +#define DRM_SCANOUTPOS_INVBL (1 << 1) +#define DRM_SCANOUTPOS_ACCURATE (1 << 2) + /** * DRM driver structure. This structure represent the common code for * a family of cards. There will one drm_device for each card present @@ -760,6 +775,68 @@ struct drm_driver { */ int (*device_is_agp) (struct drm_device *dev); + /** + * Called by vblank timestamping code. + * + * Return the current display scanout position from a crtc. + * + * \param dev DRM device. + * \param crtc Id of the crtc to query. + * \param *vpos Target location for current vertical scanout position. + * \param *hpos Target location for current horizontal scanout position. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * DRM_SCANOUTPOS_VALID = Query successfull. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant + * but unknown small number of scanlines wrt. real scanout position. + * + */ + int (*get_scanout_position) (struct drm_device *dev, int crtc, + int *vpos, int *hpos); + + /** + * Called by \c drm_get_last_vbltimestamp. Should return a precise + * timestamp when the most recent VBLANK interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of VBLANK will start scanning out, + * the time immmediately after end of the VBLANK interval. If the + * @crtc is currently inside VBLANK, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * \param dev dev DRM device handle. + * \param crtc crtc for which timestamp should be returned. + * \param *max_error Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most *max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * \param *vblank_time Target location for returned vblank timestamp. + * \param flags 0 = Defaults, no special treatment needed. + * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank + * irq handler. Some drivers need to apply some workarounds + * for gpu-specific vblank irq quirks if flag is set. + * + * \returns + * Zero if timestamping isn't supported in current display mode or a + * negative number on failure. A positive status code on success, + * which describes how the vblank_time timestamp was computed. + */ + int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + /* these have to be filled in */ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); @@ -983,6 +1060,8 @@ struct drm_device { wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ + struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ spinlock_t vbl_lock; atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ @@ -1282,11 +1361,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); +extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, + struct timeval *vblanktime); extern void drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, + struct timeval *tvblank, unsigned flags); +extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + int crtc, int *max_error, + struct timeval *vblank_time, + unsigned flags, + struct drm_crtc *refcrtc); +extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); + /* Modesetting support */ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); @@ -1337,6 +1427,9 @@ extern void drm_put_dev(struct drm_device *dev); extern int drm_put_minor(struct drm_minor **minor); extern unsigned int drm_debug; +extern unsigned int drm_vblank_offdelay; +extern unsigned int drm_timestamp_precision; + extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern struct dentry *drm_debugfs_root; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 029aa688e787..acd7fade160d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -351,8 +351,14 @@ struct drm_crtc { bool enabled; + /* Requested mode from modesetting. */ struct drm_display_mode mode; + /* Programmed mode in hw, after adjustments for encoders, + * crtc, panel scaling etc. Needed for timestamping etc. + */ + struct drm_display_mode hwmode; + int x, y; const struct drm_crtc_funcs *funcs; @@ -360,6 +366,9 @@ struct drm_crtc { uint32_t gamma_size; uint16_t *gamma_store; + /* Constants needed for precise vblank and swap timestamping. */ + s64 framedur_ns, linedur_ns, pixeldur_ns; + /* if you are using the helper */ void *helper_private; }; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 883c1d439899..e6b28a39942f 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -419,6 +419,10 @@ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0, 0, 0} #define r128_PCI_IDS \ diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index bc5590b1a1ac..e2cfe80f6fca 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_PCI_VENDOR 3 #define NOUVEAU_GETPARAM_PCI_DEVICE 4 #define NOUVEAU_GETPARAM_BUS_TYPE 5 -#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 -#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 #define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_AGP_SIZE 9 -#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 #define NOUVEAU_GETPARAM_PTIMER_TIME 14 #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 +#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 struct drm_nouveau_getparam { uint64_t param; uint64_t value; @@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf { }; #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 -#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 struct drm_nouveau_gem_cpu_prep { uint32_t handle; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index beafc156a535..50852aad260a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -74,6 +74,8 @@ struct ttm_placement { * @is_iomem: is this io memory ? * @size: size in byte * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve * * Structure indicating the bus placement of an object. */ @@ -83,7 +85,8 @@ struct ttm_bus_placement { unsigned long size; unsigned long offset; bool is_iomem; - bool io_reserved; + bool io_reserved_vm; + uint64_t io_reserved_count; }; @@ -154,7 +157,6 @@ struct ttm_tt; * keeps one refcount. When this refcount reaches zero, * the object is destroyed. * @event_queue: Queue for processes waiting on buffer object status change. - * @lock: spinlock protecting mostly synchronization members. * @mem: structure describing current placement. * @persistant_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member @@ -213,7 +215,6 @@ struct ttm_buffer_object { struct kref kref; struct kref list_kref; wait_queue_head_t event_queue; - spinlock_t lock; /** * Members protected by the bo::reserved lock. @@ -237,6 +238,7 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; struct list_head swap; + struct list_head io_reserve_lru; uint32_t val_seq; bool seq_valid; @@ -248,10 +250,10 @@ struct ttm_buffer_object { atomic_t reserved; /** - * Members protected by the bo::lock + * Members protected by struct buffer_object_device::fence_lock * In addition, setting sync_obj to anything else * than NULL requires bo::reserved to be held. This allows for - * checking NULL while reserved but not holding bo::lock. + * checking NULL while reserved but not holding the mentioned lock. */ void *sync_obj_arg; @@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, */ extern void ttm_bo_unref(struct ttm_buffer_object **bo); + +/** + * ttm_bo_list_ref_sub + * + * @bo: The buffer object. + * @count: The number of references with which to decrease @bo::list_kref; + * @never_free: The refcount should not reach zero with this operation. + * + * Release @count lru list references to this buffer object. + */ +extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free); + +/** + * ttm_bo_add_to_lru + * + * @bo: The buffer object. + * + * Add this bo to the relevant mem type lru and, if it's backed by + * system pages (ttms) to the swap list. + * This function must be called with struct ttm_bo_global::lru_lock held, and + * is typically called immediately prior to unreserving a bo. + */ +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_del_from_lru + * + * @bo: The buffer object. + * + * Remove this bo from all lru lists used to lookup and reserve an object. + * This function must be called with struct ttm_bo_global::lru_lock held, + * and is usually called just immediately after the bo has been reserved to + * avoid recursive reservation from lru lists. + */ +extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); + + /** * ttm_bo_lock_delayed_workqueue * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 8e0c848326b6..1da8af6ac884 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -179,30 +179,6 @@ struct ttm_tt { #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ -/** - * struct ttm_mem_type_manager - * - * @has_type: The memory type has been initialized. - * @use_type: The memory type is enabled. - * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. - * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h - * @default_caching: The default caching policy used for a buffer object - * placed in this memory type if the user doesn't provide one. - * @manager: The range manager used for this memory type. FIXME: If the aperture - * has a page size different from the underlying system, the granularity - * of this manager should take care of this. But the range allocating code - * in ttm_bo.c needs to be modified for this. - * @lru: The lru list for this memory type. - * - * This structure is used to identify and manage memory types for a device. - * It's set up by the ttm_bo_driver::init_mem_type method. - */ - struct ttm_mem_type_manager; struct ttm_mem_type_manager_func { @@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func { void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); }; +/** + * struct ttm_mem_type_manager + * + * @has_type: The memory type has been initialized. + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @priv: Driver private closure for @func. + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * + * This structure is used to identify and manage memory types for a device. + * It's set up by the ttm_bo_driver::init_mem_type method. + */ + + + struct ttm_mem_type_manager { struct ttm_bo_device *bdev; @@ -303,6 +309,15 @@ struct ttm_mem_type_manager { uint32_t default_caching; const struct ttm_mem_type_manager_func *func; void *priv; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + bool io_reserve_fastpath; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; /* * Protected by the global->lru_lock. @@ -510,9 +525,12 @@ struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. + * @fence_lock: Protects the synchronizing members on *all* bos belonging + * to this device. * @addr_space_mm: Range manager for the device address space. * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. + * @val_seq: Current validation sequence. * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. * If a GPU lockup has been detected, this is forced to 0. * @dev_mapping: A pointer to the struct address_space representing the @@ -531,6 +549,7 @@ struct ttm_bo_device { struct ttm_bo_driver *driver; rwlock_t vm_lock; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + spinlock_t fence_lock; /* * Protected by the vm lock. */ @@ -541,6 +560,7 @@ struct ttm_bo_device { * Protected by the global:lru lock. */ struct list_head ddestroy; + uint32_t val_seq; /* * Protected by load / firstopen / lastclose /unload sync. @@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); -/** - * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. - * - * @bo Pointer to a struct ttm_buffer_object. - * @bus_base On return the base of the PCI region - * @bus_offset On return the byte offset into the PCI region - * @bus_size On return the byte size of the buffer object or zero if - * the buffer object memory is not accessible through a PCI region. - * - * Returns: - * -EINVAL if the buffer object is currently not mappable. - * 0 otherwise. - */ - -extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, - unsigned long *bus_base, - unsigned long *bus_offset, - unsigned long *bus_size); - -extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); -extern void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); - extern void ttm_bo_global_release(struct drm_global_reference *ref); extern int ttm_bo_global_init(struct drm_global_reference *ref); @@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); /** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + * + * The caller must take ttm_mem_io_lock before calling this function. + */ +extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); + +extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, + bool interruptible); +extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); + + +/** * ttm_bo_reserve: * * @bo: A pointer to a struct ttm_buffer_object. @@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * try again. (only if use_sequence == 1). * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EDEADLK: Bo already reserved using @sequence. This error code will only + * be returned if @use_sequence is set to true. */ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); + +/** + * ttm_bo_reserve_locked: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @use_sequence: If @bo is already reserved, Only sleep waiting for + * it to become unreserved if @sequence < (@bo)->sequence. + * + * Must be called with struct ttm_bo_global::lru_lock held, + * and will not remove reserved buffers from the lru lists. + * The function may release the LRU spinlock if it needs to sleep. + * Otherwise identical to ttm_bo_reserve. + * + * Returns: + * -EAGAIN: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EDEADLK: Bo already reserved using @sequence. This error code will only + * be returned if @use_sequence is set to true. + */ +extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_sequence, + uint32_t sequence); + /** * ttm_bo_unreserve * @@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); /** + * ttm_bo_unreserve_locked + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + * Needs to be called with struct ttm_bo_global::lru_lock held. + */ +extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); + +/** * ttm_bo_wait_unreserved * * @bo: A pointer to a struct ttm_buffer_object. diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index cd2c475da9ea..26cc7f9ffa41 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -41,7 +41,10 @@ * @bo: refcounted buffer object pointer. * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once * adding a new sync object. - * @reservied: Indicates whether @bo has been reserved for validation. + * @reserved: Indicates whether @bo has been reserved for validation. + * @removed: Indicates whether @bo has been removed from lru lists. + * @put_count: Number of outstanding references on bo::list_kref. + * @old_sync_obj: Pointer to a sync object about to be unreferenced */ struct ttm_validate_buffer { @@ -49,6 +52,9 @@ struct ttm_validate_buffer { struct ttm_buffer_object *bo; void *new_sync_obj_arg; bool reserved; + bool removed; + int put_count; + void *old_sync_obj; }; /** @@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); * function ttm_eu_reserve_buffers * * @list: thread private list of ttm_validate_buffer structs. - * @val_seq: A unique sequence number. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); * has failed. */ -extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); +extern int ttm_eu_reserve_buffers(struct list_head *list); /** * function ttm_eu_fence_buffer_objects. diff --git a/include/linux/kref.h b/include/linux/kref.h index 6cc38fc07ab7..d4a62ab2ee5e 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -24,5 +24,7 @@ struct kref { void kref_init(struct kref *kref); void kref_get(struct kref *kref); int kref_put(struct kref *kref, void (*release) (struct kref *kref)); +int kref_sub(struct kref *kref, unsigned int count, + void (*release) (struct kref *kref)); #endif /* _KREF_H_ */ diff --git a/lib/kref.c b/lib/kref.c index d3d227a08a4b..3efb882b11db 100644 --- a/lib/kref.c +++ b/lib/kref.c @@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) return 0; } + +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + WARN_ON(release == (void (*)(struct kref *))kfree); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + EXPORT_SYMBOL(kref_init); EXPORT_SYMBOL(kref_get); EXPORT_SYMBOL(kref_put); +EXPORT_SYMBOL(kref_sub); |