summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-11 14:46:53 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-11 14:46:53 +0200
commit336879b1da97fffc097f77c6d6f818660f2826f0 (patch)
tree4ddb4d1c5d2b67fb096c72e41d2a03b01a605041 /drivers/gpu
parent3d3cbd84300e7be1e53083cac0f6f9c12978ecb4 (diff)
parentfdcaa1dbb7c6ed419b10fb8cdb5001ab0a00538f (diff)
Merge remote-tracking branch 'airlied/drm-next' into topic/vblank-rework
Dave asked me to do the backmerge before sending him the revised pull request, so here we go. Nothing fancy in the conflicts, just a few things changed right next to each another. Conflicts: drivers/gpu/drm/drm_irq.c Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c20
-rw-r--r--drivers/gpu/drm/bochs/bochs.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c3
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c17
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c90
-rw-r--r--drivers/gpu/drm/drm_crtc.c333
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c21
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c85
-rw-r--r--drivers/gpu/drm/drm_fops.c22
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_info.c59
-rw-r--r--drivers/gpu/drm/drm_ioctl.c29
-rw-r--r--drivers/gpu/drm/drm_irq.c84
-rw-r--r--drivers/gpu/drm/drm_legacy.h44
-rw-r--r--drivers/gpu/drm/drm_lock.c37
-rw-r--r--drivers/gpu/drm/drm_memory.c12
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c213
-rw-r--r--drivers/gpu/drm/drm_pci.c41
-rw-r--r--drivers/gpu/drm/drm_platform.c38
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c17
-rw-r--r--drivers/gpu/drm/drm_usb.c88
-rw-r--r--drivers/gpu/drm/drm_vm.c74
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c112
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c13
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c9
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c234
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c191
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h173
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c220
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c155
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c231
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c240
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c125
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c115
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h243
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c171
-rw-r--r--drivers/gpu/drm/i915/intel_display.c799
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c153
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c67
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c94
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c44
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1697
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h112
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c563
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c148
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h46
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c98
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c7
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c49
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile25
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ioctl.c531
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c477
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c380
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c160
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c210
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h470
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/notify.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/class.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/event.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltc.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c665
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/base.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c)167
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c)121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h658
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c114
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h84
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c120
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c320
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c325
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h70
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c137
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c513
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c254
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c136
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c183
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c384
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c920
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h558
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvif/event.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/ioctl.h128
-rw-r--r--drivers/gpu/drm/nouveau/nvif/list.h353
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c248
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c304
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.h75
l---------drivers/gpu/drm/nouveau/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/unpack.h24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig7
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c21
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c203
-rw-r--r--drivers/gpu/drm/qxl/Makefile2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h34
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c91
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c19
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c72
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c173
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c101
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/Makefile6
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c49
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.c (renamed from drivers/gpu/drm/drm_buffer.c)6
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.h148
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c158
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h42
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c472
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c119
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c274
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c88
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c212
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c30
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c19
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c111
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c23
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c1
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c1
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c6
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c123
-rw-r--r--drivers/gpu/drm/tegra/dc.h5
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c208
-rw-r--r--drivers/gpu/drm/tegra/drm.h10
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c13
-rw-r--r--drivers/gpu/drm/tegra/gem.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.h16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c24
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c280
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c146
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c13
-rw-r--r--drivers/gpu/drm/udl/Kconfig3
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c102
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_main.c8
-rw-r--r--drivers/gpu/drm/via/via_drv.c1
-rw-r--r--drivers/gpu/drm/via/via_map.c2
-rw-r--r--drivers/gpu/drm/via/via_mm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c183
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c346
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c43
-rw-r--r--drivers/gpu/host1x/job.c22
-rw-r--r--drivers/gpu/ipu-v3/Makefile3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c916
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c764
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c741
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c778
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h44
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c156
497 files changed, 24890 insertions, 10335 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b066bb3ca01a..e3b4b0f02b3d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -8,6 +8,7 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
+ select FB_CMDLINE
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
@@ -24,12 +25,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_USB
- tristate
- depends on DRM
- depends on USB_SUPPORT && USB_ARCH_HAS_HCD
- select USB
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -115,6 +110,7 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ select MMU_NOTIFIER
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4a55d59ccd22..9292a761ea6d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
-drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
@@ -22,8 +22,6 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
-drm-usb-y := drm_usb.o
-
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
@@ -36,7 +34,6 @@ CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
-obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3f620e21e06b..9a0cc09e6653 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1064,11 +1064,9 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
if (ret)
return ret;
- base = devm_request_and_ioremap(dev, res);
- if (!base) {
- DRM_ERROR("failed to ioremap register\n");
- return -ENOMEM;
- }
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
if (!dcrtc) {
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e2d5792b140f..f672e6ad8afa 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -308,6 +308,7 @@ static struct drm_driver armada_drm_driver = {
.postclose = NULL,
.lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
+ .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index bb9b642d8485..7496f55611a5 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -539,7 +539,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
int flags)
{
return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
- O_RDWR);
+ O_RDWR, NULL);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 44074fbcf7ff..9a32d9dfdd26 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -51,7 +51,7 @@ static struct drm_driver driver;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+static const struct pci_device_id pciidlist[] = {
AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
/* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
@@ -199,6 +199,7 @@ static struct drm_driver driver = {
.load = ast_driver_load,
.unload = ast_driver_unload,
+ .set_busid = drm_pci_set_busid,
.fops = &ast_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 957d4fabf1e1..cb91c2acc3cb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -316,7 +316,7 @@ struct ast_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
- u32 placements[3];
+ struct ttm_place placements[3];
int pin_count;
};
#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index b8246227bab0..8008ea0bc76c 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast)
void ast_ttm_placement(struct ast_bo *bo, int domain)
{
u32 c = 0;
- bo->placement.fpfn = 0;
- bo->placement.lpfn = 0;
+ unsigned i;
+
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
}
int ast_bo_create(struct drm_device *dev, int size, int align,
@@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 7eb52dd44b01..4f6e7b3a3635 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -99,7 +99,7 @@ struct bochs_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
- u32 placements[3];
+ struct ttm_place placements[3];
int pin_count;
};
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index f5e0ead974a6..98837bde2d25 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -82,6 +82,7 @@ static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = bochs_load,
.unload = bochs_unload,
+ .set_busid = drm_pci_set_busid,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -177,7 +178,7 @@ static void bochs_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+static const struct pci_device_id bochs_pci_tbl[] = {
{
.vendor = 0x1234,
.device = 0x1111,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 1728a1b0b813..2af30e7607d7 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs)
static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
{
+ unsigned i;
u32 c = 0;
- bo->placement.fpfn = 0;
- bo->placement.lpfn = 0;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM) {
- bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+ bo->placements[c++].flags = TTM_PL_FLAG_WC
+ | TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM;
}
if (domain & TTM_PL_FLAG_SYSTEM) {
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING
+ | TTM_PL_FLAG_SYSTEM;
}
if (!c) {
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING
+ | TTM_PL_FLAG_SYSTEM;
+ }
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
}
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
@@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 4516b052cc67..e705335101a5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -29,7 +29,7 @@ module_param_named(modeset, cirrus_modeset, int, 0400);
static struct drm_driver driver;
/* only bind to the cirrus chip in qemu */
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+static const struct pci_device_id pciidlist[] = {
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
0, 0 },
{0,}
@@ -128,6 +128,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
+ .set_busid = drm_pci_set_busid,
.fops = &cirrus_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 401c890b6c6a..dd2cfc9024aa 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -163,7 +163,7 @@ struct cirrus_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
- u32 placements[3];
+ struct ttm_place placements[3];
int pin_count;
};
#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 92e6b7786097..3e7d758330a9 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
{
u32 c = 0;
- bo->placement.fpfn = 0;
- bo->placement.lpfn = 0;
+ unsigned i;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
}
int cirrus_bo_create(struct drm_device *dev, int size, int align,
@@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index dde205cef384..4b2b4aa5033b 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include "drm_legacy.h"
#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 3cedae12b3c1..708a2044c631 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -35,6 +35,12 @@
#include <drm/drmP.h>
+struct drm_magic_entry {
+ struct list_head head;
+ struct drm_hash_item hash_item;
+ struct drm_file *priv;
+};
+
/**
* Find the file with the given magic number.
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 61acb8f6756d..9e04d6a43fa4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1,18 +1,13 @@
-/**
- * \file drm_bufs.c
- * Generic buffer template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Legacy: Generic DRM Buffer Management
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -39,6 +34,7 @@
#include <linux/export.h>
#include <asm/shmparam.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
@@ -365,9 +361,9 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
-int drm_addmap(struct drm_device * dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
struct drm_map_list *list;
int rc;
@@ -377,8 +373,7 @@ int drm_addmap(struct drm_device * dev, resource_size_t offset,
*map_ptr = list->map;
return rc;
}
-
-EXPORT_SYMBOL(drm_addmap);
+EXPORT_SYMBOL(drm_legacy_addmap);
/**
* Ioctl to specify a range of memory that is available for mapping by a
@@ -391,8 +386,8 @@ EXPORT_SYMBOL(drm_addmap);
* \return zero on success or a negative value on error.
*
*/
-int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *maplist;
@@ -429,9 +424,9 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
* its being used, and free any associate resource (such as MTRR's) if it's not
* being on use.
*
- * \sa drm_addmap
+ * \sa drm_legacy_addmap
*/
-int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
@@ -485,19 +480,19 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_rmmap_locked);
+EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_rmmap_locked(dev, map);
+ ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_rmmap);
+EXPORT_SYMBOL(drm_legacy_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
@@ -514,8 +509,8 @@ EXPORT_SYMBOL(drm_rmmap);
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*/
-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_map *request = data;
struct drm_local_map *map = NULL;
@@ -546,7 +541,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
- ret = drm_rmmap_locked(dev, map);
+ ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
@@ -599,7 +594,8 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+int drm_legacy_addbufs_agp(struct drm_device *dev,
+ struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@@ -759,10 +755,11 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
-EXPORT_SYMBOL(drm_addbufs_agp);
+EXPORT_SYMBOL(drm_legacy_addbufs_agp);
#endif /* __OS_HAS_AGP */
-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+int drm_legacy_addbufs_pci(struct drm_device *dev,
+ struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
int count;
@@ -964,9 +961,10 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return 0;
}
-EXPORT_SYMBOL(drm_addbufs_pci);
+EXPORT_SYMBOL(drm_legacy_addbufs_pci);
-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+static int drm_legacy_addbufs_sg(struct drm_device *dev,
+ struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@@ -1135,8 +1133,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
* PCI memory respectively.
*/
-int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_addbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_buf_desc *request = data;
int ret;
@@ -1149,15 +1147,15 @@ int drm_addbufs(struct drm_device *dev, void *data,
#if __OS_HAS_AGP
if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_addbufs_agp(dev, request);
+ ret = drm_legacy_addbufs_agp(dev, request);
else
#endif
if (request->flags & _DRM_SG_BUFFER)
- ret = drm_addbufs_sg(dev, request);
+ ret = drm_legacy_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
ret = -EINVAL;
else
- ret = drm_addbufs_pci(dev, request);
+ ret = drm_legacy_addbufs_pci(dev, request);
return ret;
}
@@ -1179,8 +1177,8 @@ int drm_addbufs(struct drm_device *dev, void *data,
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
-int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_info *request = data;
@@ -1260,8 +1258,8 @@ int drm_infobufs(struct drm_device *dev, void *data,
*
* \note This ioctl is deprecated and mostly never used.
*/
-int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_markbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_desc *request = data;
@@ -1307,8 +1305,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
* Calls free_buffer() for each used buffer.
* This function is primarily used for debugging.
*/
-int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_freebufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_free *request = data;
@@ -1360,8 +1358,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
-int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
@@ -1448,7 +1446,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
-int drm_dma_ioctl(struct drm_device *dev, void *data,
+int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1460,7 +1458,7 @@ int drm_dma_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
+struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
@@ -1472,4 +1470,4 @@ struct drm_local_map *drm_getsarea(struct drm_device *dev)
}
return NULL;
}
-EXPORT_SYMBOL(drm_getsarea);
+EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ca8bb1bc92a0..7d7c1fd15443 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -45,101 +45,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_modeset_acquire_ctx *ctx;
- int ret;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (WARN_ON(!ctx))
- return;
-
- mutex_lock(&config->mutex);
-
- drm_modeset_acquire_init(ctx, 0);
-
-retry:
- ret = drm_modeset_lock(&config->connection_mutex, ctx);
- if (ret)
- goto fail;
- ret = drm_modeset_lock_all_crtcs(dev, ctx);
- if (ret)
- goto fail;
-
- WARN_ON(config->acquire_ctx);
-
- /* now we hold the locks, so now that it is safe, stash the
- * ctx for drm_modeset_unlock_all():
- */
- config->acquire_ctx = ctx;
-
- drm_warn_on_modeset_not_all_locked(dev);
-
- return;
-
-fail:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(ctx);
- goto retry;
- }
-}
-EXPORT_SYMBOL(drm_modeset_lock_all);
-
-/**
- * drm_modeset_unlock_all - drop all modeset locks
- * @dev: device
- *
- * This function drop all modeset locks taken by drm_modeset_lock_all.
- */
-void drm_modeset_unlock_all(struct drm_device *dev)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
-
- if (WARN_ON(!ctx))
- return;
-
- config->acquire_ctx = NULL;
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-
- kfree(ctx);
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_modeset_unlock_all);
-
-/**
- * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
- * @dev: device
- *
- * Useful as a debug assert.
- */
-void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- /* Locking is currently fubar in the panic handler. */
- if (oops_in_progress)
- return;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-}
-EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
-
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
const char *fnname(int val) \
@@ -515,9 +420,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
if (ret)
goto out;
- /* Grab the idr reference. */
- drm_framebuffer_reference(fb);
-
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
out:
@@ -527,10 +429,34 @@ out:
}
EXPORT_SYMBOL(drm_framebuffer_init);
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+}
+
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
container_of(kref, struct drm_framebuffer, refcount);
+ struct drm_device *dev = fb->dev;
+
+ /*
+ * The lookup idr holds a weak reference, which has not necessarily been
+ * removed at this point. Check for that.
+ */
+ mutex_lock(&dev->mode_config.fb_lock);
+ if (fb->base.id) {
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
fb->funcs->destroy(fb);
}
@@ -567,8 +493,10 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
- if (fb)
- drm_framebuffer_reference(fb);
+ if (fb) {
+ if (!kref_get_unless_zero(&fb->refcount))
+ fb = NULL;
+ }
mutex_unlock(&dev->mode_config.fb_lock);
return fb;
@@ -612,19 +540,6 @@ static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
-/* dev->mode_config.fb_lock must be held! */
-static void __drm_framebuffer_unregister(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- fb->base.id = 0;
-
- __drm_framebuffer_unreference(fb);
-}
-
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
@@ -853,6 +768,59 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ * @mode: returned mode
+ *
+ * The kernel supports per-connector configration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+ struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+ char *option = NULL;
+
+ if (fb_get_options(connector->name, &option))
+ return;
+
+ if (!drm_mode_parse_command_line_for_connector(option,
+ connector,
+ mode))
+ return;
+
+ if (mode->force) {
+ const char *s;
+
+ switch (mode->force) {
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
+ default:
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
+ }
+
+ DRM_INFO("forcing %s connector %s\n", connector->name, s);
+ connector->force = mode->force;
+ }
+
+ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+ connector->name,
+ mode->xres, mode->yres,
+ mode->refresh_specified ? mode->refresh : 60,
+ mode->rb ? " reduced blanking" : "",
+ mode->margins ? " with margins" : "",
+ mode->interlace ? " interlaced" : "");
+}
+
+/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
@@ -904,6 +872,8 @@ int drm_connector_init(struct drm_device *dev,
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
+ drm_connector_get_cmdline_mode(connector);
+
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
@@ -957,6 +927,29 @@ void drm_connector_cleanup(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_cleanup);
/**
+ * drm_connector_index - find the index of a registered connector
+ * @connector: connector to find index for
+ *
+ * Given a registered connector, return the index of that connector within a DRM
+ * device's list of connectors.
+ */
+unsigned int drm_connector_index(struct drm_connector *connector)
+{
+ unsigned int index = 0;
+ struct drm_connector *tmp;
+
+ list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+ if (tmp == connector)
+ return index;
+
+ index++;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL(drm_connector_index);
+
+/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
@@ -1261,6 +1254,29 @@ void drm_plane_cleanup(struct drm_plane *plane)
EXPORT_SYMBOL(drm_plane_cleanup);
/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that CRTC within a DRM
+ * device's list of planes.
+ */
+unsigned int drm_plane_index(struct drm_plane *plane)
+{
+ unsigned int index = 0;
+ struct drm_plane *tmp;
+
+ list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+ if (tmp == plane)
+ return index;
+
+ index++;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL(drm_plane_index);
+
+/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
*
@@ -1271,19 +1287,21 @@ EXPORT_SYMBOL(drm_plane_cleanup);
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
- struct drm_framebuffer *old_fb = plane->fb;
int ret;
- if (!old_fb)
+ if (!plane->fb)
return;
+ plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
+ plane->old_fb = NULL;
return;
}
/* disconnect the plane from the fb and crtc: */
- __drm_framebuffer_unreference(old_fb);
+ __drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
}
@@ -2259,23 +2277,21 @@ static int setplane_internal(struct drm_plane *plane,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
- struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
+ drm_modeset_lock_all(dev);
/* No fb means shut it down */
if (!fb) {
- drm_modeset_lock_all(dev);
- old_fb = plane->fb;
+ plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
- old_fb = NULL;
+ plane->old_fb = NULL;
}
- drm_modeset_unlock_all(dev);
goto out;
}
@@ -2315,8 +2331,7 @@ static int setplane_internal(struct drm_plane *plane,
goto out;
}
- drm_modeset_lock_all(dev);
- old_fb = plane->fb;
+ plane->old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
@@ -2325,15 +2340,16 @@ static int setplane_internal(struct drm_plane *plane,
plane->fb = fb;
fb = NULL;
} else {
- old_fb = NULL;
+ plane->old_fb = NULL;
}
- drm_modeset_unlock_all(dev);
out:
if (fb)
drm_framebuffer_unreference(fb);
- if (old_fb)
- drm_framebuffer_unreference(old_fb);
+ if (plane->old_fb)
+ drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
+ drm_modeset_unlock_all(dev);
return ret;
@@ -2440,7 +2456,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
- tmp->old_fb = tmp->primary->fb;
+ tmp->primary->old_fb = tmp->primary->fb;
fb = set->fb;
@@ -2453,8 +2469,9 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
- if (tmp->old_fb)
- drm_framebuffer_unreference(tmp->old_fb);
+ if (tmp->primary->old_fb)
+ drm_framebuffer_unreference(tmp->primary->old_fb);
+ tmp->primary->old_fb = NULL;
}
return ret;
@@ -2785,7 +2802,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (crtc->cursor)
return drm_mode_cursor_universal(crtc, req, file_priv);
- drm_modeset_lock(&crtc->mutex, NULL);
+ drm_modeset_lock_crtc(crtc);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
@@ -2809,7 +2826,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
}
}
out:
- drm_modeset_unlock(&crtc->mutex);
+ drm_modeset_unlock_crtc(crtc);
return ret;
@@ -3244,7 +3261,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
- if (drm_is_master(file_priv) || capable(CAP_SYS_ADMIN) ||
+ if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
@@ -3495,9 +3512,10 @@ EXPORT_SYMBOL(drm_property_create_enum);
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property bitflags
- * @num_values: number of pre-defined values
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
*
- * This creates a new generic drm property which can then be attached to a drm
+ * This creates a new bitmask drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
@@ -4157,12 +4175,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t value)
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value)
{
int ret = -EINVAL;
- struct drm_plane *plane = obj_to_plane(obj);
+ struct drm_mode_object *obj = &plane->base;
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
@@ -4171,6 +4202,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
/**
* drm_mode_getproperty_ioctl - get the current value of a object's property
@@ -4309,7 +4341,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
- ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+ property, arg->value);
break;
}
@@ -4529,7 +4562,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
{
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_framebuffer *fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int ret = -EINVAL;
@@ -4545,7 +4578,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- drm_modeset_lock(&crtc->mutex, NULL);
+ drm_modeset_lock_crtc(crtc);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -4601,7 +4634,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
- old_fb = crtc->primary->fb;
+ crtc->primary->old_fb = crtc->primary->fb;
ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -4611,7 +4644,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
kfree(e);
}
/* Keep the old fb, don't unref it. */
- old_fb = NULL;
+ crtc->primary->old_fb = NULL;
} else {
/*
* Warn if the driver hasn't properly updated the crtc->fb
@@ -4627,9 +4660,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
out:
if (fb)
drm_framebuffer_unreference(fb);
- if (old_fb)
- drm_framebuffer_unreference(old_fb);
- drm_modeset_unlock(&crtc->mutex);
+ if (crtc->primary->old_fb)
+ drm_framebuffer_unreference(crtc->primary->old_fb);
+ crtc->primary->old_fb = NULL;
+ drm_modeset_unlock_crtc(crtc);
return ret;
}
@@ -4645,9 +4679,14 @@ out:
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 13bd42923dd4..4491dbda653e 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -49,9 +49,7 @@ static const struct drm_info_list drm_debugfs_list[] = {
{"clients", drm_clients_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
-#endif
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ac3c2738db94..b3adf1445020 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1772,7 +1772,7 @@ static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
case DP_LINK_BW_5_4:
return 10 * dp_link_count;
}
- return 0;
+ BUG();
}
/**
@@ -2071,6 +2071,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
+ * @handled: whether the hpd interrupt was consumed or not
*
* This should be called from the driver when it detects a short IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index db03e16ca817..970613c5a1eb 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -133,7 +133,6 @@ EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
@@ -143,7 +142,7 @@ static void drm_master_destroy(struct kref *kref)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
+ drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
@@ -154,12 +153,6 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
drm_ht_remove(&master->magiclist);
mutex_unlock(&dev->struct_mutex);
@@ -179,7 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
int ret = 0;
mutex_lock(&dev->master_mutex);
- if (drm_is_master(file_priv))
+ if (file_priv->is_master)
goto out_unlock;
if (file_priv->minor->master) {
@@ -193,10 +186,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
}
file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
drm_master_put(&file_priv->minor->master);
+ }
}
out_unlock:
@@ -210,7 +206,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
int ret = -EINVAL;
mutex_lock(&dev->master_mutex);
- if (!drm_is_master(file_priv))
+ if (!file_priv->is_master)
goto out_unlock;
if (!file_priv->minor->master)
@@ -220,6 +216,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
+ file_priv->is_master = 0;
out_unlock:
mutex_unlock(&dev->master_mutex);
@@ -775,7 +772,7 @@ void drm_dev_unregister(struct drm_device *dev)
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
+ drm_legacy_rmmap(dev, r_list->map);
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1dbf3bc4c6a3..f905c63c0f68 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3433,10 +3433,10 @@ EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_assign_hdmi_deep_color_info - detect whether monitor supports
* hdmi deep color modes and update drm_display_info if so.
- *
* @edid: monitor EDID information
* @info: Updated with maximum supported deep color bpc and color format
* if deep color supported.
+ * @connector: DRM connector, used only for debug output
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI deep color supported, false if not or unknown.
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3144db9dc0f1..0c0c39bac23d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -126,7 +126,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
- temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
+ temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
if (!temp)
return -ENOMEM;
@@ -171,60 +171,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
-static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
-{
- struct drm_fb_helper_connector *fb_helper_conn;
- int i;
-
- for (i = 0; i < fb_helper->connector_count; i++) {
- struct drm_cmdline_mode *mode;
- struct drm_connector *connector;
- char *option = NULL;
-
- fb_helper_conn = fb_helper->connector_info[i];
- connector = fb_helper_conn->connector;
- mode = &fb_helper_conn->cmdline_mode;
-
- /* do something on return - turn off connector maybe */
- if (fb_get_options(connector->name, &option))
- continue;
-
- if (drm_mode_parse_command_line_for_connector(option,
- connector,
- mode)) {
- if (mode->force) {
- const char *s;
- switch (mode->force) {
- case DRM_FORCE_OFF:
- s = "OFF";
- break;
- case DRM_FORCE_ON_DIGITAL:
- s = "ON - dig";
- break;
- default:
- case DRM_FORCE_ON:
- s = "ON";
- break;
- }
-
- DRM_INFO("forcing %s connector %s\n",
- connector->name, s);
- connector->force = mode->force;
- }
-
- DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- connector->name,
- mode->xres, mode->yres,
- mode->refresh_specified ? mode->refresh : 60,
- mode->rb ? " reduced blanking" : "",
- mode->margins ? " with margins" : "",
- mode->interlace ? " interlaced" : "");
- }
-
- }
- return 0;
-}
-
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
@@ -345,10 +291,17 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
+ if (dev->mode_config.rotation_property) {
+ drm_mode_plane_set_obj_prop(plane,
+ dev->mode_config.rotation_property,
+ BIT(DRM_ROTATE_0));
+ }
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
@@ -419,11 +372,11 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
- /* NOTE: we use lockless flag below to avoid grabbing other
- * modeset locks. So just trylock the underlying mutex
- * directly:
+ /*
+ * NOTE: Use trylock mode to avoid deadlocks and sleeping in
+ * panic context.
*/
- if (!mutex_trylock(&dev->mode_config.mutex)) {
+ if (__drm_modeset_lock_all(dev, true) != 0) {
error = true;
continue;
}
@@ -432,7 +385,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (ret)
error = true;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
return error;
}
@@ -1013,7 +966,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
- cmdline_mode = &fb_helper_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
@@ -1260,9 +1213,7 @@ EXPORT_SYMBOL(drm_has_preferred_mode);
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- cmdline_mode = &fb_connector->cmdline_mode;
- return cmdline_mode->specified;
+ return fb_connector->connector->cmdline_mode.specified;
}
struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
@@ -1272,7 +1223,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
struct drm_display_mode *mode = NULL;
bool prefer_non_interlace;
- cmdline_mode = &fb_helper_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
@@ -1657,8 +1608,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
- drm_fb_helper_parse_command_line(fb_helper);
-
mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4b060942cb3c..b419990042b0 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -194,6 +194,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
goto out_close;
}
+ priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
@@ -267,11 +268,11 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- if (drm_i_have_hw_lock(dev, file_priv)) {
+ if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ drm_legacy_lock_free(&file_priv->master->lock,
+ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
@@ -329,8 +330,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
*/
int drm_lastclose(struct drm_device * dev)
{
- struct drm_vma_entry *vma, *vma_temp;
-
DRM_DEBUG("\n");
if (dev->driver->lastclose)
@@ -345,13 +344,7 @@ int drm_lastclose(struct drm_device * dev)
drm_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
+ drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
@@ -425,7 +418,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->master_mutex);
- if (drm_is_master(file_priv)) {
+ if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
/**
@@ -453,6 +446,7 @@ int drm_release(struct inode *inode, struct file *filp)
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
+ file_priv->is_master = 0;
mutex_unlock(&dev->master_mutex);
if (dev->driver->postclose)
@@ -462,6 +456,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
+ WARN_ON(!list_empty(&file_priv->event_list));
+
put_pid(file_priv->pid);
kfree(file_priv);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7e4bae760e27..c3b80fd65d62 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = &entry->head;
}
if (parent) {
- hlist_add_after_rcu(parent, &item->head);
+ hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ecaf0fa2eec8..3c99f6f60818 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -223,62 +223,3 @@ int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-
-#if DRM_DEBUG_CODE
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
- unsigned long vma_count = 0;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(pt, &dev->vmalist, head)
- vma_count++;
-
- seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
- vma_count, high_memory,
- (void *)(unsigned long)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
- pt->pid,
- (void *)vma->vm_start, (void *)vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-#endif
-
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d3d1a8c72e98..187dfaaeb491 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -62,8 +62,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
@@ -82,17 +82,17 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -189,7 +189,6 @@ drm_unset_busid(struct drm_device *dev,
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
- master->unique_size = 0;
}
/**
@@ -245,15 +244,15 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (dev->driver->bus && dev->driver->bus->set_busid) {
- ret = dev->driver->bus->set_busid(dev, master);
+ if (dev->driver->set_busid) {
+ ret = dev->driver->set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
}
} else {
if (WARN(dev->unique == NULL,
- "No drm_bus.set_busid() implementation provided by "
+ "No drm_driver.set_busid() implementation provided by "
"%ps. Use drm_dev_set_unique() to set the unique "
"name explicitly.", dev->driver))
return -EINVAL;
@@ -607,7 +606,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return -EACCES;
/* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !drm_is_master(file_priv) &&
+ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
!drm_is_control_client(file_priv)))
return -EACCES;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 79836594030c..034297640b48 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -639,8 +639,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode)
{
- ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
+ ktime_t stime, etime;
int vbl_status;
int vpos, hpos, i;
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
@@ -685,13 +685,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
&hpos, &stime, &etime);
- /*
- * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
- * CLOCK_REALTIME is requested.
- */
- if (!drm_timestamp_monotonic)
- mono_time_offset = ktime_get_monotonic_offset();
-
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -730,7 +723,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
if (!drm_timestamp_monotonic)
- etime = ktime_sub(etime, mono_time_offset);
+ etime = ktime_mono_to_real(etime);
/* save this only for debugging purposes */
tv_etime = ktime_to_timeval(etime);
@@ -761,10 +754,7 @@ static struct timeval get_drm_timestamp(void)
{
ktime_t now;
- now = ktime_get();
- if (!drm_timestamp_monotonic)
- now = ktime_sub(now, ktime_get_monotonic_offset());
-
+ now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
return ktime_to_timeval(now);
}
@@ -829,6 +819,8 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return 0;
return atomic_read(&vblank->count);
}
EXPORT_SYMBOL(drm_vblank_count);
@@ -852,6 +844,9 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 cur_vblank;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return 0;
+
/* Read timestamp from slot of _vblank_time ringbuffer
* that corresponds to current vblank count. Retry if
* count has incremented during readout. This works like
@@ -965,6 +960,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
unsigned long irqflags;
int ret = 0;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return -EINVAL;
+
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
@@ -1015,6 +1013,9 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
BUG_ON(atomic_read(&vblank->refcount) == 0);
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
@@ -1044,6 +1045,50 @@ void drm_crtc_vblank_put(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
+ * drm_wait_one_vblank - wait for one vblank
+ * @dev: DRM device
+ * @crtc: crtc index
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+{
+ int ret;
+ u32 last;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (WARN_ON(ret))
+ return;
+
+ last = drm_vblank_count(dev, crtc);
+
+ ret = wait_event_timeout(dev->vblank[crtc].queue,
+ last != drm_vblank_count(dev, crtc),
+ msecs_to_jiffies(100));
+
+ WARN_ON(ret == 0);
+
+ drm_vblank_put(dev, crtc);
+}
+EXPORT_SYMBOL(drm_wait_one_vblank);
+
+/**
+ * drm_crtc_wait_one_vblank - wait for one vblank
+ * @crtc: DRM crtc
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
+{
+ drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
+
+/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1065,6 +1110,9 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
unsigned int seq;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
@@ -1134,6 +1182,9 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
@@ -1209,6 +1260,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
+
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
@@ -1532,6 +1587,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
if (!dev->num_crtcs)
return false;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return false;
+
spin_lock_irqsave(&dev->event_lock, irqflags);
/* Need timestamp lock to prevent concurrent execution with
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d34f20a79b7c..3049af5a01b3 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -23,6 +23,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+/*
+ * This file contains legacy interfaces that modern drm drivers
+ * should no longer be using. They cannot be removed as legacy
+ * drivers use them, and removing them are API breaks.
+ */
+#include <linux/list.h>
+
+struct agp_memory;
struct drm_device;
struct drm_file;
@@ -48,4 +56,40 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+/*
+ * Generic Buffer Management
+ */
+
+#define DRM_MAP_HASH_OFFSET 0x10000000
+
+int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+
+/*
+ * AGP Support
+ */
+
+struct drm_agp_mem {
+ unsigned long handle;
+ struct agp_memory *memory;
+ unsigned long bound;
+ int pages;
+ struct list_head head;
+};
+
+/*
+ * Generic Userspace Locking-API
+ */
+
+int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
+int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
+
#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index ea1572596578..727b032292b4 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -52,7 +52,8 @@ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
-int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_lock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
@@ -112,7 +113,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
- if (!drm_is_master(file_priv)) {
+ if (!file_priv->is_master) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
@@ -120,7 +121,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ block_all_signals(drm_notifier, dev, &dev->sigmask);
}
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
@@ -146,7 +147,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Transfer and free the lock.
*/
-int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
@@ -157,7 +158,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- if (drm_lock_free(&master->lock, lock->context)) {
+ if (drm_legacy_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
@@ -250,7 +251,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
-int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -286,26 +287,27 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
* If the lock is not held, then let the signal proceed as usual. If the lock
* is held, then set the contended flag and keep the signal blocked.
*
- * \param priv pointer to a drm_sigdata structure.
+ * \param priv pointer to a drm_device structure.
* \return one if the signal should be delivered normally, or zero if the
* signal should be blocked.
*/
static int drm_notifier(void *priv)
{
- struct drm_sigdata *s = (struct drm_sigdata *) priv;
+ struct drm_device *dev = priv;
+ struct drm_hw_lock *lock = dev->sigdata.lock;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
- if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
- || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+ if (!lock || !_DRM_LOCK_IS_HELD(lock->lock)
+ || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context)
return 1;
/* Otherwise, set flag to force call to
drmUnlock */
do {
- old = s->lock->lock;
+ old = lock->lock;
new = old | _DRM_LOCK_CONT;
- prev = cmpxchg(&s->lock->lock, old, new);
+ prev = cmpxchg(&lock->lock, old, new);
} while (prev != old);
return 0;
}
@@ -323,7 +325,7 @@ static int drm_notifier(void *priv)
* having to worry about starvation.
*/
-void drm_idlelock_take(struct drm_lock_data *lock_data)
+void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
@@ -340,9 +342,9 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_take);
+EXPORT_SYMBOL(drm_legacy_idlelock_take);
-void drm_idlelock_release(struct drm_lock_data *lock_data)
+void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -360,9 +362,10 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_release);
+EXPORT_SYMBOL(drm_legacy_idlelock_release);
-int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+int drm_legacy_i_have_hw_lock(struct drm_device *dev,
+ struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 00c67c0f2381..62fda6aaad90 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -36,8 +36,20 @@
#include <linux/highmem.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
#if __OS_HAS_AGP
+
+#ifdef HAVE_PAGE_AGP
+# include <asm/agp.h>
+#else
+# ifdef __powerpc__
+# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# else
+# define PAGE_AGP PAGE_KERNEL
+# endif
+#endif
+
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e633df2f68d8..6aa6a9e95570 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach);
/**
* mipi_dsi_dcs_write - send DCS write command
* @dsi: DSI device
- * @channel: virtual channel
* @data: pointer to the command followed by parameters
* @len: length of @data
*/
-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
- const void *data, size_t len)
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
@@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
* mipi_dsi_dcs_read - send DCS read request command
* @dsi: DSI device
- * @channel: virtual channel
* @cmd: DCS read command
* @data: pointer to read buffer
* @len: length of @data
*
* Function returns number of read bytes or error code.
*/
-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
- u8 cmd, void *data, size_t len)
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index bedf1894e17e..d1b7d2006529 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1259,6 +1259,7 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
if (!mode)
return NULL;
+ mode->type |= DRM_MODE_TYPE_USERDEF;
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 0dc57d5ecd10..8749fc06570e 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -57,6 +57,212 @@
/**
+ * __drm_modeset_lock_all - internal helper to grab all modeset locks
+ * @dev: DRM device
+ * @trylock: trylock mode for atomic contexts
+ *
+ * This is a special version of drm_modeset_lock_all() which can also be used in
+ * atomic contexts. Then @trylock must be set to true.
+ *
+ * Returns:
+ * 0 on success or negative error code on failure.
+ */
+int __drm_modeset_lock_all(struct drm_device *dev,
+ bool trylock)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx),
+ trylock ? GFP_ATOMIC : GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (trylock) {
+ if (!mutex_trylock(&config->mutex))
+ return -EBUSY;
+ } else {
+ mutex_lock(&config->mutex);
+ }
+
+ drm_modeset_acquire_init(ctx, 0);
+ ctx->trylock_only = trylock;
+
+retry:
+ ret = drm_modeset_lock(&config->connection_mutex, ctx);
+ if (ret)
+ goto fail;
+ ret = drm_modeset_lock_all_crtcs(dev, ctx);
+ if (ret)
+ goto fail;
+
+ WARN_ON(config->acquire_ctx);
+
+ /* now we hold the locks, so now that it is safe, stash the
+ * ctx for drm_modeset_unlock_all():
+ */
+ config->acquire_ctx = ctx;
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+ return 0;
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__drm_modeset_lock_all);
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ *
+ * This function drop all modeset locks taken by drm_modeset_lock_all.
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+
+ if (WARN_ON(!ctx))
+ return;
+
+ config->acquire_ctx = NULL;
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
+ * @crtc: drm crtc
+ *
+ * This function locks the given crtc using a hidden acquire context. This is
+ * necessary so that drivers internally using the atomic interfaces can grab
+ * further locks with the lock acquire context.
+ */
+void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+{
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (WARN_ON(!ctx))
+ return;
+
+ drm_modeset_acquire_init(ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ goto fail;
+
+ WARN_ON(crtc->acquire_ctx);
+
+ /* now we hold the locks, so now that it is safe, stash the
+ * ctx for drm_modeset_unlock_crtc():
+ */
+ crtc->acquire_ctx = ctx;
+
+ return;
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+}
+EXPORT_SYMBOL(drm_modeset_lock_crtc);
+
+/**
+ * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
+ * @crtc: drm crtc
+ *
+ * Legacy ioctl operations like cursor updates or page flips only have per-crtc
+ * locking, and store the acquire ctx in the corresponding crtc. All other
+ * legacy operations take all locks and use a global acquire context. This
+ * function grabs the right one.
+ */
+struct drm_modeset_acquire_ctx *
+drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
+{
+ if (crtc->acquire_ctx)
+ return crtc->acquire_ctx;
+
+ WARN_ON(!crtc->dev->mode_config.acquire_ctx);
+
+ return crtc->dev->mode_config.acquire_ctx;
+}
+EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
+
+/**
+ * drm_modeset_unlock_crtc - drop crtc lock
+ * @crtc: drm crtc
+ *
+ * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
+ * locks acquired through the hidden context.
+ */
+void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
+{
+ struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
+
+ if (WARN_ON(!ctx))
+ return;
+
+ crtc->acquire_ctx = NULL;
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+
+ kfree(ctx);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_crtc);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ *
+ * Useful as a debug assert.
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ /* Locking is currently fubar in the panic handler. */
+ if (oops_in_progress)
+ return;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
+/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
* @flags: for future
@@ -108,7 +314,12 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
WARN_ON(ctx->contended);
- if (interruptible && slow) {
+ if (ctx->trylock_only) {
+ if (!ww_mutex_trylock(&lock->mutex))
+ return -EBUSY;
+ else
+ return 0;
+ } else if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 020cfd934854..7563130c6b70 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -127,34 +127,20 @@ static int drm_get_pci_domain(struct drm_device *dev)
return pci_domain_nr(dev->pdev->bus);
}
-static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
- int len, ret;
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
+ master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+ if (!master->unique)
return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
-
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
+ master->unique_len = strlen(master->unique);
return 0;
-err:
- return ret;
}
+EXPORT_SYMBOL(drm_pci_set_busid);
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
@@ -163,8 +149,7 @@ int drm_pci_set_unique(struct drm_device *dev,
int domain, bus, slot, func, ret;
master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
@@ -269,10 +254,6 @@ void drm_pci_agp_destroy(struct drm_device *dev)
}
}
-static struct drm_bus drm_pci_bus = {
- .set_busid = drm_pci_set_busid,
-};
-
/**
* drm_get_pci_dev - Register a PCI device with the DRM subsystem
* @pdev: PCI device
@@ -353,8 +334,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- driver->bus = &drm_pci_bus;
-
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(pdriver);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index d5b76f148c12..5314c9d5fef4 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,42 +68,23 @@ err_free:
return ret;
}
-static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
- int len, ret, id;
-
- master->unique_len = 13 + strlen(dev->platformdev->name);
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
+ int id;
id = dev->platformdev->id;
-
- /* if only a single instance of the platform device, id will be
- * set to -1.. use 0 instead to avoid a funny looking bus-id:
- */
- if (id == -1)
+ if (id < 0)
id = 0;
- len = snprintf(master->unique, master->unique_len,
- "platform:%s:%02d", dev->platformdev->name, id);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
+ master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
+ dev->platformdev->name, id);
+ if (!master->unique)
+ return -ENOMEM;
+ master->unique_len = strlen(master->unique);
return 0;
-err:
- return ret;
}
-
-static struct drm_bus drm_platform_bus = {
- .set_busid = drm_platform_set_busid,
-};
+EXPORT_SYMBOL(drm_platform_set_busid);
/**
* drm_platform_init - Register a platform device with the DRM subsystem
@@ -120,7 +101,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
{
DRM_DEBUG("\n");
- driver->bus = &drm_platform_bus;
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 304ca8cacbc4..99d578bad17e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -336,7 +336,13 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+ struct reservation_object *robj = NULL;
+
+ if (dev->driver->gem_prime_res_obj)
+ robj = dev->driver->gem_prime_res_obj(obj);
+
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+ flags, robj);
}
EXPORT_SYMBOL(drm_gem_prime_export);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index db7d250f7ac7..6857e9ad6339 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,6 +82,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
return;
}
+static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ if (!connector->cmdline_mode.specified)
+ return 0;
+
+ mode = drm_mode_create_from_cmdline_mode(connector->dev,
+ &connector->cmdline_mode);
+ if (mode == NULL)
+ return 0;
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
@@ -141,6 +157,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
+ count += drm_helper_probe_add_cmdline_mode(connector);
if (count == 0)
goto prune;
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
deleted file mode 100644
index f2fe94aab901..000000000000
--- a/drivers/gpu/drm/drm_usb.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include <drm/drmP.h>
-#include <drm/drm_usb.h>
-#include <linux/usb.h>
-#include <linux/module.h>
-
-int drm_get_usb_dev(struct usb_interface *interface,
- const struct usb_device_id *id,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = drm_dev_alloc(driver, &interface->dev);
- if (!dev)
- return -ENOMEM;
-
- dev->usbdev = interface_to_usbdev(interface);
- usb_set_intfdata(interface, dev);
-
- ret = drm_dev_register(dev, 0);
- if (ret)
- goto err_free;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary->index);
-
- return 0;
-
-err_free:
- drm_dev_unref(dev);
- return ret;
-
-}
-EXPORT_SYMBOL(drm_get_usb_dev);
-
-static int drm_usb_set_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- return 0;
-}
-
-static struct drm_bus drm_usb_bus = {
- .set_busid = drm_usb_set_busid,
-};
-
-/**
- * drm_usb_init - Register matching USB devices with the DRM subsystem
- * @driver: DRM device driver
- * @udriver: USB device driver
- *
- * Registers one or more devices matched by a USB driver with the DRM
- * subsystem.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
-{
- int res;
- DRM_DEBUG("\n");
-
- driver->bus = &drm_usb_bus;
-
- res = usb_register(udriver);
- return res;
-}
-EXPORT_SYMBOL(drm_usb_init);
-
-/**
- * drm_usb_exit - Unregister matching USB devices from the DRM subsystem
- * @driver: DRM device driver
- * @udriver: USB device driver
- *
- * Unregisters one or more devices matched by a USB driver from the DRM
- * subsystem.
- */
-void drm_usb_exit(struct drm_driver *driver,
- struct usb_driver *udriver)
-{
- usb_deregister(udriver);
-}
-EXPORT_SYMBOL(drm_usb_exit);
-
-MODULE_AUTHOR("David Airlie");
-MODULE_DESCRIPTION("USB DRM support");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 24e045c4f531..4b3e9c4754d1 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -35,10 +35,19 @@
#include <drm/drmP.h>
#include <linux/export.h>
+#include <linux/seq_file.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
+#include <asm/pgtable.h>
+#include "drm_legacy.h"
+
+struct drm_vma_entry {
+ struct list_head head;
+ struct vm_area_struct *vma;
+ pid_t pid;
+};
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
@@ -662,3 +671,68 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
EXPORT_SYMBOL(drm_mmap);
+
+void drm_legacy_vma_flush(struct drm_device *dev)
+{
+ struct drm_vma_entry *vma, *vma_temp;
+
+ /* Clear vma list (only needed for legacy drivers) */
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ kfree(vma);
+ }
+}
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_vma_entry *pt;
+ struct vm_area_struct *vma;
+ unsigned long vma_count = 0;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(pt, &dev->vmalist, head)
+ vma_count++;
+
+ seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+ vma_count, high_memory,
+ (void *)(unsigned long)virt_to_phys(high_memory));
+
+ list_for_each_entry(pt, &dev->vmalist, head) {
+ vma = pt->vma;
+ if (!vma)
+ continue;
+ seq_printf(m,
+ "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+ pt->pid,
+ (void *)vma->vm_start, (void *)vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_pgoff);
+
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_PSE ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+ seq_printf(m, "\n");
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 9ba1aaeb8070..7f9f6f9e9b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -53,6 +53,7 @@ config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
+ select DRM_PANEL
help
This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 31c3de98b885..4f3c7eb2d37d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
@@ -28,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
#include "exynos_drm_drv.h"
@@ -41,7 +41,7 @@ struct bridge_init {
struct device_node *node;
};
-static int exynos_dp_init_dp(struct exynos_dp_device *dp)
+static void exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
@@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
exynos_dp_init_hpd(dp);
exynos_dp_init_aux(dp);
-
- return 0;
}
static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
@@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
static void exynos_dp_hotplug(struct work_struct *work)
{
struct exynos_dp_device *dp;
- int ret;
dp = container_of(work, struct exynos_dp_device, hotplug_work);
+ if (dp->drm_dev)
+ drm_helper_hpd_irq_event(dp->drm_dev);
+}
+
+static void exynos_dp_commit(struct exynos_drm_display *display)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ int ret;
+
+ /* Keep the panel disabled while we configure video */
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel))
+ DRM_ERROR("failed to disable the panel\n");
+ }
+
ret = exynos_dp_detect_hpd(dp);
if (ret) {
/* Cable has been disconnected, we're done */
@@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work)
ret = exynos_dp_config_video(dp);
if (ret)
dev_err(dp->dev, "unable to config video\n");
+
+ /* Safe to enable the panel now */
+ if (dp->panel) {
+ if (drm_panel_enable(dp->panel))
+ DRM_ERROR("failed to enable the panel\n");
+ }
}
static enum drm_connector_status exynos_dp_detect(
@@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
struct exynos_dp_device *dp = ctx_from_connector(connector);
struct drm_display_mode *mode;
+ if (dp->panel)
+ return drm_panel_get_modes(dp->panel);
+
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
- drm_display_mode_from_videomode(&dp->panel.vm, mode);
- mode->width_mm = dp->panel.width_mm;
- mode->height_mm = dp->panel.height_mm;
+ drm_display_mode_from_videomode(&dp->priv.vm, mode);
+ mode->width_mm = dp->priv.width_mm;
+ mode->height_mm = dp->priv.height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -1021,7 +1042,10 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
+ if (dp->panel)
+ ret = drm_panel_attach(dp->panel, &dp->connector);
+
+ return ret;
}
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
}
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_poweron(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_prepare(dp->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return;
+ }
+ }
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
+ exynos_dp_commit(display);
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
}
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
@@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
+ exynos_dp_poweron(display);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
+ exynos_dp_poweroff(display);
break;
default:
break;
@@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
static struct exynos_drm_display_ops exynos_dp_display_ops = {
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
+ .commit = exynos_dp_commit,
};
static struct exynos_drm_display exynos_dp_display = {
@@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
- ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
OF_USE_NATIVE_MODE);
if (ret) {
DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
@@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp;
+ struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
-
int ret = 0;
- dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
- GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
-
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = exynos_dp_dt_parse_panel(dp);
- if (ret)
- return ret;
+ if (!dp->panel) {
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+ }
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
@@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
disable_irq(dp->irq);
dp->drm_dev = drm_dev;
- exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
@@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct exynos_dp_device *dp;
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
@@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ panel_node = of_parse_phandle(dev->of_node, "panel", 0);
+ if (panel_node) {
+ dp->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!dp->panel)
+ return -EPROBE_DEFER;
+ }
+
+ exynos_dp_display.ctx = dp;
+
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 02cc4f9ab903..a1aee6931bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -149,6 +149,7 @@ struct exynos_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_encoder *encoder;
+ struct drm_panel *panel;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -162,7 +163,7 @@ struct exynos_dp_device {
int dpms_mode;
int hpd_gpio;
- struct exynos_drm_panel_info panel;
+ struct exynos_drm_panel_info priv;
};
/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 2a3ad24276f8..60192ed544f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -187,7 +187,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
return dma_buf_export(obj, &exynos_dmabuf_ops,
- exynos_gem_obj->base.size, flags);
+ exynos_gem_obj->base.size, flags, NULL);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3aa1c7ebbfcc..fa08f05e3e34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
static void exynos_dpi_poweron(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
+ drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
+ }
}
static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
drm_panel_disable(ctx->panel);
+ drm_panel_unprepare(ctx->panel);
+ }
}
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 0d74e9b99c4e..5aae95cf5b23 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -330,6 +330,7 @@ static struct drm_driver exynos_drm_driver = {
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
+ .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 86aebd83a71b..442aa2d00132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1333,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ret = drm_panel_enable(dsi->panel);
+ ret = drm_panel_prepare(dsi->panel);
if (ret < 0) {
exynos_dsi_poweroff(dsi);
return ret;
@@ -1342,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
dsi->state |= DSIM_STATE_ENABLED;
return 0;
@@ -1352,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- exynos_dsi_set_display_enable(dsi, false);
drm_panel_disable(dsi->panel);
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
dsi->state &= ~DSIM_STATE_ENABLED;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index a97e38e284fa..d75ecb3bdee7 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -39,7 +39,6 @@ static void mid_get_fuse_settings(struct drm_device *dev)
#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE (1 << 11)
#define FB_REG09 0xD0810900
-#define FB_REG09 0xD0810900
#define FB_SKU_MASK 0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index e6f5c620a0a2..54f73f50571a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -674,7 +674,7 @@ failed_connector:
kfree(gma_encoder);
}
-static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+static const struct pci_device_id hdmi_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
{ 0 }
};
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6e8fe9ec02b5..6ec3a905fdd2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -54,7 +54,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
* PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
* N2800
*/
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600)
@@ -476,6 +476,7 @@ static struct drm_driver driver = {
.unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
.preclose = psb_driver_preclose,
+ .set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e88bac1d781f..c97e2ff6a35a 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -393,15 +393,14 @@ static int i810_dma_initialize(struct drm_device *dev,
/* Program Hardware Status Page */
dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
+ pci_zalloc_consistent(dev->pdev, PAGE_SIZE,
+ &dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, dev_priv->dma_status_page);
@@ -1216,9 +1215,9 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
}
if (file_priv->master && file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
+ drm_legacy_idlelock_take(&file_priv->master->lock);
i810_driver_reclaim_buffers(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
+ drm_legacy_idlelock_release(&file_priv->master->lock);
} else {
/* master disappeared, clean up stuff anyway and hope nothing
* goes wrong */
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 441ccf8f5bdc..6cb08a1c6b62 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -63,6 +63,7 @@ static struct drm_driver driver = {
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
+ .set_busid = drm_pci_set_busid,
.device_is_agp = i810_driver_device_is_agp,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91bd167e1cb7..c1dd485aeb6c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
+ intel_lrc.o \
intel_ringbuffer.o \
intel_uncore.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index dea99d92fb4a..c45856bcc8b9 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -842,8 +842,6 @@ finish:
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (!ring->needs_cmd_parser)
return false;
@@ -852,7 +850,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
* disabled. That will cause all of the parser's PPGTT checks to
* fail. For now, disable parsing when PPGTT is off.
*/
- if (!dev_priv->mm.aliasing_ppgtt)
+ if (USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9e737b771c40..6c82bdaa0822 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data)
}
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
- if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
+ if (ppgtt->file_priv != stats->file_priv)
continue;
if (obj->ring) /* XXX per-vma statistic */
@@ -703,6 +703,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(pipe) {
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe))) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
seq_printf(m, "Pipe %c IMR:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
@@ -1433,6 +1439,47 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_fbc_fc_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+ *val = dev_priv->fbc.false_color;
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_fbc_fc_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ reg = I915_READ(ILK_DPFC_CONTROL);
+ dev_priv->fbc.false_color = val;
+
+ I915_WRITE(ILK_DPFC_CONTROL, val ?
+ (reg | FBC_CTL_FALSE_COLOR) :
+ (reg & ~FBC_CTL_FALSE_COLOR));
+
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
+ i915_fbc_fc_get, i915_fbc_fc_set,
+ "%llu\n");
+
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1630,6 +1677,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
+static void describe_ctx_ringbuf(struct seq_file *m,
+ struct intel_ringbuffer *ringbuf)
+{
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
+ ringbuf->space, ringbuf->head, ringbuf->tail,
+ ringbuf->last_retired_head);
+}
+
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1656,16 +1711,168 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->legacy_hw_ctx.rcs_state == NULL)
+ if (!i915.enable_execlists &&
+ ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(ring, dev_priv, i) {
if (ring->default_context == ctx)
- seq_printf(m, "(default context %s) ", ring->name);
+ seq_printf(m, "(default context %s) ",
+ ring->name);
+ }
+
+ if (i915.enable_execlists) {
+ seq_putc(m, '\n');
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[i].state;
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[i].ringbuf;
+
+ seq_printf(m, "%s: ", ring->name);
+ if (ctx_obj)
+ describe_obj(m, ctx_obj);
+ if (ringbuf)
+ describe_ctx_ringbuf(m, ringbuf);
+ seq_putc(m, '\n');
+ }
+ } else {
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+ }
+
+ seq_putc(m, '\n');
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_dump_lrc(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
+ int ret, i;
+
+ if (!i915.enable_execlists) {
+ seq_printf(m, "Logical Ring Contexts are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+
+ if (ring->default_context == ctx)
+ continue;
+
+ if (ctx_obj) {
+ struct page *page = i915_gem_object_get_page(ctx_obj, 1);
+ uint32_t *reg_state = kmap_atomic(page);
+ int j;
+
+ seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+ intel_execlists_ctx_id(ctx_obj));
+
+ for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+ seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
+ reg_state[j], reg_state[j + 1],
+ reg_state[j + 2], reg_state[j + 3]);
+ }
+ kunmap_atomic(reg_state);
+
+ seq_putc(m, '\n');
+ }
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_execlists(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 status_pointer;
+ u8 read_pointer;
+ u8 write_pointer;
+ u32 status;
+ u32 ctx_id;
+ struct list_head *cursor;
+ int ring_id, i;
+ int ret;
+
+ if (!i915.enable_execlists) {
+ seq_puts(m, "Logical Ring Contexts are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ for_each_ring(ring, dev_priv, ring_id) {
+ struct intel_ctx_submit_request *head_req = NULL;
+ int count = 0;
+ unsigned long flags;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ status = I915_READ(RING_EXECLIST_STATUS(ring));
+ ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
+ seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
+ status, ctx_id);
+
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
+
+ read_pointer = ring->next_context_status_buffer;
+ write_pointer = status_pointer & 0x07;
+ if (read_pointer > write_pointer)
+ write_pointer += 6;
+ seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
+ read_pointer, write_pointer);
+
+ for (i = 0; i < 6; i++) {
+ status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
+ ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
+
+ seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
+ i, status, ctx_id);
+ }
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_for_each(cursor, &ring->execlist_queue)
+ count++;
+ head_req = list_first_entry_or_null(&ring->execlist_queue,
+ struct intel_ctx_submit_request, execlist_link);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ seq_printf(m, "\t%d requests in queue\n", count);
+ if (head_req) {
+ struct drm_i915_gem_object *ctx_obj;
+
+ ctx_obj = head_req->ctx->engine[ring_id].state;
+ seq_printf(m, "\tHead request id: %u\n",
+ intel_execlists_ctx_id(ctx_obj));
+ seq_printf(m, "\tHead request tail: %u\n",
+ head_req->tail);
+ }
- describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
@@ -1774,7 +1981,13 @@ static int per_file_ctx(int id, void *ptr, void *data)
{
struct intel_context *ctx = ptr;
struct seq_file *m = data;
- struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+
+ if (!ppgtt) {
+ seq_printf(m, " no ppgtt for context %d\n",
+ ctx->user_handle);
+ return 0;
+ }
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
@@ -1834,8 +2047,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
ppgtt->debug_dump(ppgtt, m);
- } else
- return;
+ }
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2667,8 +2879,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
drm_modeset_lock_all(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (!encoder->base.crtc)
continue;
@@ -3923,6 +4134,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_opregion", i915_opregion, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
+ {"i915_dump_lrc", i915_dump_lrc, 0},
+ {"i915_execlists", i915_execlists, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -3957,6 +4170,7 @@ static const struct i915_debugfs_files {
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+ {"i915_fbc_false_color", &i915_fbc_fc_fops},
};
void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..689c3326636f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -196,7 +196,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
- master_priv->sarea = drm_getsarea(dev);
+ master_priv->sarea = drm_legacy_getsarea(dev);
if (master_priv->sarea) {
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
@@ -999,7 +999,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
+ value = USES_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
@@ -1350,8 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
- INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
-
intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
@@ -1388,7 +1386,6 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- WARN_ON(dev_priv->mm.aliasing_ppgtt);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1603,9 +1600,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = dev_priv;
dev_priv->dev = dev;
- /* copy initial configuration to dev_priv->info */
+ /* Setup the write-once "constant" device info */
device_info = (struct intel_device_info *)&dev_priv->info;
- *device_info = *info;
+ memcpy(device_info, info, sizeof(dev_priv->info));
+ device_info->device_id = dev->pdev->device;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1817,7 +1815,7 @@ out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
- dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+ i915_global_gtt_cleanup(dev);
out_regs:
intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1864,7 +1862,6 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
- cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
@@ -1897,7 +1894,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
- WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
@@ -1905,8 +1901,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- WARN_ON(!list_empty(&dev_priv->vm_list));
-
drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
@@ -1916,7 +1910,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
- dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+ i915_global_gtt_cleanup(dev);
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
@@ -1981,6 +1975,9 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_modeset_preclose(dev, file);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6c4b25ce8bb0..cdd95956811d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -481,6 +481,14 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0)
return i915.semaphores;
+ /* TODO: make semaphores and Execlists play nicely together */
+ if (i915.enable_execlists)
+ return false;
+
+ /* Until we get further testing... */
+ if (IS_GEN8(dev))
+ return false;
+
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -490,6 +498,40 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true;
}
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ dev_priv->long_hpd_port_mask = 0;
+ dev_priv->short_hpd_port_mask = 0;
+ dev_priv->hpd_event_bits = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ cancel_work_sync(&dev_priv->dig_port_work);
+ cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_encoder *encoder;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (intel_encoder->suspend)
+ intel_encoder->suspend(intel_encoder);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static int intel_suspend_complete(struct drm_i915_private *dev_priv);
+static int intel_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume);
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -534,6 +576,9 @@ static int i915_drm_freeze(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev);
+ intel_hpd_cancel_work(dev_priv);
+
+ intel_suspend_encoders(dev_priv);
intel_suspend_gt_powersave(dev);
@@ -554,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
- console_lock();
- intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
- console_unlock();
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
dev_priv->suspend_count++;
@@ -595,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
-void intel_console_resume(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- console_resume_work);
- struct drm_device *dev = dev_priv->dev;
-
- console_lock();
- intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
- console_unlock();
-}
-
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_disable_pc8(dev_priv);
+ ret = intel_resume_prepare(dev_priv, false);
+ if (ret)
+ DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
- return 0;
+ return ret;
}
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
@@ -677,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_init(dev);
- /*
- * The console lock can be pretty contented on resume due
- * to all the printk activity. Try to keep it out of the hot
- * path of resume if possible.
- */
- if (console_trylock()) {
- intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
- console_unlock();
- } else {
- schedule_work(&dev_priv->console_resume_work);
- }
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
@@ -904,6 +927,7 @@ static int i915_pm_suspend_late(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -917,13 +941,16 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
- hsw_enable_pc8(dev_priv);
+ ret = intel_suspend_complete(dev_priv);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ if (ret)
+ DRM_ERROR("Suspend complete failed: %d\n", ret);
+ else {
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
- return 0;
+ return ret;
}
static int i915_pm_resume_early(struct device *dev)
@@ -979,23 +1006,26 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
-static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
+static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
return 0;
}
-static int snb_runtime_resume(struct drm_i915_private *dev_priv)
+static int snb_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
- intel_init_pch_refclk(dev);
+ if (rpm_resume)
+ intel_init_pch_refclk(dev);
return 0;
}
-static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
+static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume)
{
hsw_disable_pc8(dev_priv);
@@ -1291,7 +1321,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
-static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
u32 mask;
int err;
@@ -1331,7 +1361,8 @@ err1:
return err;
}
-static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
int err;
@@ -1356,8 +1387,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
vlv_check_no_gt_access(dev_priv);
- intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
+ if (rpm_resume) {
+ intel_init_clock_gating(dev);
+ i915_gem_restore_fences(dev);
+ }
return ret;
}
@@ -1372,7 +1405,9 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
- WARN_ON(!HAS_RUNTIME_PM(dev));
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ return -ENODEV;
+
assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
@@ -1409,17 +1444,7 @@ static int intel_runtime_suspend(struct device *device)
cancel_work_sync(&dev_priv->rps.work);
intel_runtime_pm_disable_interrupts(dev);
- if (IS_GEN6(dev)) {
- ret = 0;
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- ret = hsw_runtime_suspend(dev_priv);
- } else if (IS_VALLEYVIEW(dev)) {
- ret = vlv_runtime_suspend(dev_priv);
- } else {
- ret = -ENODEV;
- WARN_ON(1);
- }
-
+ ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_restore_interrupts(dev);
@@ -1450,24 +1475,15 @@ static int intel_runtime_resume(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- WARN_ON(!HAS_RUNTIME_PM(dev));
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- if (IS_GEN6(dev)) {
- ret = snb_runtime_resume(dev_priv);
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- ret = hsw_runtime_resume(dev_priv);
- } else if (IS_VALLEYVIEW(dev)) {
- ret = vlv_runtime_resume(dev_priv);
- } else {
- WARN_ON(1);
- ret = -ENODEV;
- }
-
+ ret = intel_resume_prepare(dev_priv, true);
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -1486,6 +1502,48 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
+/*
+ * This function implements common functionality of runtime and system
+ * suspend sequence.
+ */
+static int intel_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ ret = hsw_suspend_complete(dev_priv);
+ else if (IS_VALLEYVIEW(dev))
+ ret = vlv_suspend_complete(dev_priv);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/*
+ * This function implements common functionality of runtime and system
+ * resume sequence. Variable rpm_resume used for implementing different
+ * code paths.
+ */
+static int intel_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
+
+ if (IS_GEN6(dev))
+ ret = snb_resume_prepare(dev_priv, rpm_resume);
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ ret = hsw_resume_prepare(dev_priv, rpm_resume);
+ else if (IS_VALLEYVIEW(dev))
+ ret = vlv_resume_prepare(dev_priv, rpm_resume);
+ else
+ ret = 0;
+
+ return ret;
+}
+
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
@@ -1535,6 +1593,7 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
+ .set_busid = drm_pci_set_busid,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5de27f9b8c26..bcf8783dbc2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,6 +35,7 @@
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
+#include "intel_lrc.h"
#include "i915_gem_gtt.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
@@ -53,7 +54,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20140620"
+#define DRIVER_DATE "20140822"
enum pipe {
INVALID_PIPE = -1,
@@ -171,6 +172,11 @@ enum hpd_pin {
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -197,10 +203,13 @@ enum intel_dpll_id {
#define I915_NUM_PLLS 2
struct intel_dpll_hw_state {
+ /* i9xx, pch plls */
uint32_t dpll;
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
+
+ /* hsw, bdw */
uint32_t wrpll;
};
@@ -314,6 +323,7 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
+ u32 gtier[4];
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -386,6 +396,7 @@ struct drm_i915_error_state {
pid_t pid;
char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
+
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -404,6 +415,7 @@ struct drm_i915_error_state {
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
+ u32 vm_count;
};
struct intel_connector;
@@ -549,6 +561,7 @@ struct intel_uncore {
struct intel_device_info {
u32 display_mmio_offset;
+ u16 device_id;
u8 num_pipes:3;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
@@ -613,13 +626,20 @@ struct intel_context {
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
- struct i915_address_space *vm;
+ struct i915_hw_ppgtt *ppgtt;
+ /* Legacy ring buffer submission */
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
+ /* Execlists */
+ struct {
+ struct drm_i915_gem_object *state;
+ struct intel_ringbuffer *ringbuf;
+ } engine[I915_NUM_RINGS];
+
struct list_head link;
};
@@ -633,6 +653,8 @@ struct i915_fbc {
struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
+ bool false_color;
+
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -972,7 +994,7 @@ struct intel_ilk_power_mgmt {
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
- struct timespec last_time2;
+ u64 last_time2;
unsigned long gfx_power;
u8 corr;
@@ -1226,6 +1248,12 @@ enum modeset_restore {
};
struct ddi_vbt_port_info {
+ /*
+ * This is an index in the HDMI/DVI DDI buffer translation table.
+ * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
+ * populate this field.
+ */
+#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
uint8_t hdmi_level_shift;
uint8_t supports_dvi:1;
@@ -1457,7 +1485,7 @@ struct drm_i915_private {
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
- struct timer_list hotplug_reenable_timer;
+ struct delayed_work hotplug_reenable_work;
struct i915_fbc fbc;
struct i915_drrs drrs;
@@ -1560,14 +1588,9 @@ struct drm_i915_private {
#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ struct work_struct fbdev_suspend_work;
#endif
- /*
- * The console may be contended at resume, but we don't
- * want it to block on it.
- */
- struct work_struct console_resume_work;
-
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -1619,6 +1642,20 @@ struct drm_i915_private {
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
+ /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
+ struct {
+ int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags);
+ int (*init_rings)(struct drm_device *dev);
+ void (*cleanup_ring)(struct intel_engine_cs *ring);
+ void (*stop_ring)(struct intel_engine_cs *ring);
+ } gt;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1760,13 +1797,6 @@ struct drm_i915_gem_object {
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro:1;
-
- /*
- * Is the GPU currently using a fence to access this buffer,
- */
- unsigned int pending_fenced_gpu_access:1;
- unsigned int fenced_gpu_access:1;
-
unsigned int cache_level:3;
unsigned int has_aliasing_ppgtt_mapping:1;
@@ -1970,51 +2000,63 @@ struct drm_i915_cmd_table {
int count;
};
-#define INTEL_INFO(dev) (&to_i915(dev)->info)
-
-#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
-#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
+/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
+#define __I915__(p) ({ \
+ struct drm_i915_private *__p; \
+ if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
+ __p = (struct drm_i915_private *)p; \
+ else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
+ __p = to_i915((struct drm_device *)p); \
+ else \
+ BUILD_BUG(); \
+ __p; \
+})
+#define INTEL_INFO(p) (&__I915__(p)->info)
+#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+
+#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
+#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
+#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
-#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
+#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
+#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
+#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
+#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
+#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
+#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
- (dev)->pdev->device == 0x0152 || \
- (dev)->pdev->device == 0x015a)
-#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
- (dev)->pdev->device == 0x0106 || \
- (dev)->pdev->device == 0x010A)
+#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
+ INTEL_DEVID(dev) == 0x0152 || \
+ INTEL_DEVID(dev) == 0x015a)
+#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
+ INTEL_DEVID(dev) == 0x0106 || \
+ INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- ((dev)->pdev->device & 0xFF00) == 0x0C00)
+ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- (((dev)->pdev->device & 0xf) == 0x2 || \
- ((dev)->pdev->device & 0xf) == 0x6 || \
- ((dev)->pdev->device & 0xf) == 0xe))
+ ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
+ (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
- ((dev)->pdev->device & 0xFF00) == 0x0A00)
+ (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
- ((dev)->pdev->device & 0x00F0) == 0x0020)
+ (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
- (dev)->pdev->device == 0x0A1E)
+#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
+ INTEL_DEVID(dev) == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
@@ -2046,10 +2088,11 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
-#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
-#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
+#define USES_PPGTT(dev) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2133,6 +2176,7 @@ struct i915_params {
int enable_rc6;
int enable_fbc;
int enable_ppgtt;
+ int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
@@ -2177,8 +2221,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-extern void intel_console_resume(struct work_struct *work);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
@@ -2227,6 +2270,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+ struct intel_engine_cs *ring);
+void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj);
+int i915_gem_ringbuffer_submission(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2379,6 +2436,7 @@ void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
+int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
@@ -2449,7 +2507,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
}
/* Some GGTT VM helpers */
-#define obj_to_ggtt(obj) \
+#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
@@ -2458,21 +2516,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm)
return vm == ggtt;
}
+static inline struct i915_hw_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ WARN_ON(i915_is_ggtt(vm));
+
+ return container_of(vm, struct i915_hw_ppgtt, base);
+}
+
+
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+ return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+ return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+ return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
}
static inline int __must_check
@@ -2480,7 +2547,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
- return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
+ return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+ alignment, flags | PIN_GLOBAL);
}
static inline int
@@ -2492,7 +2560,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */
-#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
@@ -2504,6 +2571,8 @@ int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
+struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
static inline void i915_gem_context_reference(struct intel_context *ctx)
{
kref_get(&ctx->ref);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dcd8d7b42552..f1bb69377a35 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
- struct timespec *timeout,
+ s64 *timeout,
struct drm_i915_file_private *file_priv)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
- struct timespec before, now;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
+ s64 before, now;
int ret;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
+ timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(ring, seqno);
- getrawmonotonic(&before);
+ before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
destroy_timer_on_stack(&timer);
}
}
- getrawmonotonic(&now);
+ now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(ring, seqno);
if (!irq_test_in_progress)
@@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
finish_wait(&ring->irq_queue, &wait);
if (timeout) {
- struct timespec sleep_time = timespec_sub(now, before);
- *timeout = timespec_sub(*timeout, sleep_time);
- if (!timespec_valid(timeout)) /* i.e. negative time remains */
- set_normalized_timespec(timeout, 0, 0);
+ s64 tres = *timeout - (now - before);
+
+ *timeout = tres < 0 ? 0 : tres;
}
return ret;
@@ -2161,8 +2160,6 @@ static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
@@ -2181,19 +2178,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
-
- if (obj->fenced_gpu_access) {
- obj->last_fenced_seqno = seqno;
-
- /* Bump MRU to take account of the delayed flush */
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
- }
}
void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2229,7 +2213,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
obj->base.write_domain = 0;
obj->last_fenced_seqno = 0;
- obj->fenced_gpu_access = false;
obj->active = 0;
drm_gem_object_unreference(&obj->base);
@@ -2327,10 +2310,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
+ struct intel_ringbuffer *ringbuf;
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring->buffer);
+ request = ring->preallocated_lazy_request;
+ if (WARN_ON(request == NULL))
+ return -ENOMEM;
+
+ if (i915.enable_execlists) {
+ struct intel_context *ctx = request->ctx;
+ ringbuf = ctx->engine[ring->id].ringbuf;
+ } else
+ ringbuf = ring->buffer;
+
+ request_start = intel_ring_get_tail(ringbuf);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2338,24 +2332,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- ret = intel_ring_flush_all_caches(ring);
- if (ret)
- return ret;
-
- request = ring->preallocated_lazy_request;
- if (WARN_ON(request == NULL))
- return -ENOMEM;
+ if (i915.enable_execlists) {
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+ } else {
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+ }
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring->buffer);
+ request_ring_position = intel_ring_get_tail(ringbuf);
- ret = ring->add_request(ring);
- if (ret)
- return ret;
+ if (i915.enable_execlists) {
+ ret = ring->emit_request(ringbuf);
+ if (ret)
+ return ret;
+ } else {
+ ret = ring->add_request(ring);
+ if (ret)
+ return ret;
+ }
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
@@ -2370,12 +2372,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->batch_obj = obj;
- /* Hold a reference to the current context so that we can inspect
- * it later in case a hangcheck error event fires.
- */
- request->ctx = ring->last_context;
- if (request->ctx)
- i915_gem_context_reference(request->ctx);
+ if (!i915.enable_execlists) {
+ /* Hold a reference to the current context so that we can inspect
+ * it later in case a hangcheck error event fires.
+ */
+ request->ctx = ring->last_context;
+ if (request->ctx)
+ i915_gem_context_reference(request->ctx);
+ }
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
@@ -2546,6 +2550,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
+ while (!list_empty(&ring->execlist_queue)) {
+ struct intel_ctx_submit_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(submit_req->ctx);
+ kfree(submit_req);
+ }
+
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@@ -2630,6 +2646,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
+ struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
@@ -2639,12 +2656,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
+
+ /* This is one of the few common intersection points
+ * between legacy ringbuffer submission and execlists:
+ * we need to tell them apart in order to find the correct
+ * ringbuffer to which the request belongs to.
+ */
+ if (i915.enable_execlists) {
+ struct intel_context *ctx = request->ctx;
+ ringbuf = ctx->engine[ring->id].ringbuf;
+ } else
+ ringbuf = ring->buffer;
+
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
- ring->buffer->last_retired_head = request->tail;
+ ringbuf->last_retired_head = request->tail;
i915_gem_free_request(request);
}
@@ -2757,16 +2786,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring = NULL;
- struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
- if (args->timeout_ns >= 0) {
- timeout_stack = ns_to_timespec(args->timeout_ns);
- timeout = &timeout_stack;
- }
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -2791,9 +2814,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto out;
/* Do this after OLR check to make sure we make forward progress polling
- * on this IOCTL with a 0 timeout (like busy ioctl)
+ * on this IOCTL with a timeout <=0 (like busy ioctl)
*/
- if (!args->timeout_ns) {
+ if (args->timeout_ns <= 0) {
ret = -ETIME;
goto out;
}
@@ -2802,10 +2825,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
- if (timeout)
- args->timeout_ns = timespec_to_ns(timeout);
- return ret;
+ return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
out:
drm_gem_object_unreference(&obj->base);
@@ -2928,9 +2949,8 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
list_del_init(&vma->mm_list);
- /* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
- obj->map_and_fenceable = true;
+ obj->map_and_fenceable = false;
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -3175,7 +3195,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
obj->last_fenced_seqno = 0;
}
- obj->fenced_gpu_access = false;
return 0;
}
@@ -3282,6 +3301,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
} else if (enable) {
+ if (WARN_ON(!obj->map_and_fenceable))
+ return -EINVAL;
+
reg = i915_find_fence_reg(dev);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -3592,11 +3614,12 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (!i915_gem_obj_bound_any(obj))
+ if (vma == NULL)
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3638,13 +3661,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- if (i915_gem_object_is_inactive(obj)) {
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
- if (vma)
- list_move_tail(&vma->mm_list,
- &dev_priv->gtt.base.inactive_list);
-
- }
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&vma->mm_list,
+ &dev_priv->gtt.base.inactive_list);
return 0;
}
@@ -3808,9 +3827,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- if (list_empty(&obj->vma_list))
- return false;
-
vma = i915_gem_obj_to_ggtt(obj);
if (!vma)
return false;
@@ -4337,8 +4353,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
- /* Avoid an unnecessary call to unbind on the first bind. */
- obj->map_and_fenceable = true;
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
@@ -4499,12 +4513,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma)
{
+ struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
+ vm = vma->vm;
+
+ if (!i915_is_ggtt(vm))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vm));
+
list_del(&vma->vma_link);
kfree(vma);
@@ -4518,7 +4538,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
int i;
for_each_ring(ring, dev_priv, i)
- intel_stop_ring_buffer(ring);
+ dev_priv->gt.stop_ring(ring);
}
int
@@ -4554,7 +4574,7 @@ i915_gem_suspend(struct drm_device *dev)
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ flush_delayed_work(&dev_priv->mm.idle_work);
return 0;
@@ -4635,7 +4655,7 @@ intel_enable_blt(struct drm_device *dev)
return true;
}
-static int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_rings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -4718,7 +4738,7 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_init_swizzling(dev);
- ret = i915_gem_init_rings(dev);
+ ret = dev_priv->gt.init_rings(dev);
if (ret)
return ret;
@@ -4736,6 +4756,14 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
+ }
+
+ ret = i915_ppgtt_init_hw(dev);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
+ i915_gem_cleanup_ringbuffer(dev);
}
return ret;
@@ -4746,6 +4774,9 @@ int i915_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ i915.enable_execlists = intel_sanitize_enable_execlists(dev,
+ i915.enable_execlists);
+
mutex_lock(&dev->struct_mutex);
if (IS_VALLEYVIEW(dev)) {
@@ -4756,7 +4787,24 @@ int i915_gem_init(struct drm_device *dev)
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
- i915_gem_init_userptr(dev);
+ if (!i915.enable_execlists) {
+ dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+ dev_priv->gt.init_rings = i915_gem_init_rings;
+ dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
+ dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+ } else {
+ dev_priv->gt.do_execbuf = intel_execlists_submission;
+ dev_priv->gt.init_rings = intel_logical_rings_init;
+ dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
+ dev_priv->gt.stop_ring = intel_logical_ring_stop;
+ }
+
+ ret = i915_gem_init_userptr(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
@@ -4791,7 +4839,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
int i;
for_each_ring(ring, dev_priv, i)
- intel_cleanup_ring_buffer(ring);
+ dev_priv->gt.cleanup_ring(ring);
}
int
@@ -5103,9 +5151,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (!dev_priv->mm.aliasing_ppgtt ||
- vm == &dev_priv->mm.aliasing_ppgtt->base)
- vm = &dev_priv->gtt.base;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
@@ -5146,9 +5192,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- if (!dev_priv->mm.aliasing_ppgtt ||
- vm == &dev_priv->mm.aliasing_ppgtt->base)
- vm = &dev_priv->gtt.base;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
BUG_ON(list_empty(&o->vma_list));
@@ -5253,14 +5297,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- /* This WARN has probably outlived its usefulness (callers already
- * WARN if they don't find the GGTT vma they expect). When removing,
- * remember to remove the pre-check in is_pin_display() as well */
- if (WARN_ON(list_empty(&obj->vma_list)))
- return NULL;
-
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (vma->vm != obj_to_ggtt(obj))
+ if (vma->vm != i915_obj_to_ggtt(obj))
return NULL;
return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3b99390e467a..9683e62ec61a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,50 +96,6 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &ppgtt->base;
-
- if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
- (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
- ppgtt->base.cleanup(&ppgtt->base);
- return;
- }
-
- /*
- * Make sure vmas are unbound before we take down the drm_mm
- *
- * FIXME: Proper refcounting should take care of this, this shouldn't be
- * needed at all.
- */
- if (!list_empty(&vm->active_list)) {
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &vm->active_list, mm_list)
- if (WARN_ON(list_empty(&vma->vma_link) ||
- list_is_singular(&vma->vma_link)))
- break;
-
- i915_gem_evict_vm(&ppgtt->base, true);
- } else {
- i915_gem_retire_requests(dev);
- i915_gem_evict_vm(&ppgtt->base, false);
- }
-
- ppgtt->base.cleanup(&ppgtt->base);
-}
-
-static void ppgtt_release(struct kref *kref)
-{
- struct i915_hw_ppgtt *ppgtt =
- container_of(kref, struct i915_hw_ppgtt, ref);
-
- do_ppgtt_cleanup(ppgtt);
- kfree(ppgtt);
-}
-
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
@@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref,
- typeof(*ctx), ref);
- struct i915_hw_ppgtt *ppgtt = NULL;
+ typeof(*ctx), ref);
- if (ctx->legacy_hw_ctx.rcs_state) {
- /* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
- ppgtt = ctx_to_ppgtt(ctx);
- }
+ if (i915.enable_execlists)
+ intel_lr_context_free(ctx);
+
+ i915_ppgtt_put(ctx->ppgtt);
- if (ppgtt)
- kref_put(&ppgtt->ref, ppgtt_release);
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
-static struct drm_i915_gem_object *
+struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
@@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static struct i915_hw_ppgtt *
-create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
-{
- struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- ret = i915_gem_init_ppgtt(dev, ppgtt);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
-
- ppgtt->ctx = ctx;
- return ppgtt;
-}
-
static struct intel_context *
__create_hw_context(struct drm_device *dev,
- struct drm_i915_file_private *file_priv)
+ struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
@@ -301,11 +233,9 @@ err_out:
*/
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
- struct drm_i915_file_private *file_priv,
- bool create_vm)
+ struct drm_i915_file_private *file_priv)
{
const bool is_global_default_ctx = file_priv == NULL;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int ret = 0;
@@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
}
}
- if (create_vm) {
- struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+ if (USES_FULL_PPGTT(dev)) {
+ struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
if (IS_ERR_OR_NULL(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
ret = PTR_ERR(ppgtt);
goto err_unpin;
- } else
- ctx->vm = &ppgtt->base;
-
- /* This case is reserved for the global default context and
- * should only happen once. */
- if (is_global_default_ctx) {
- if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
- ret = -EEXIST;
- goto err_unpin;
- }
-
- dev_priv->mm.aliasing_ppgtt = ppgtt;
}
- } else if (USES_PPGTT(dev)) {
- /* For platforms which only have aliasing PPGTT, we fake the
- * address space and refcounting. */
- ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
- kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
- } else
- ctx->vm = &dev_priv->gtt.base;
+
+ ctx->ppgtt = ppgtt;
+ }
return ctx;
@@ -417,7 +331,11 @@ int i915_gem_context_init(struct drm_device *dev)
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
- if (HAS_HW_CONTEXTS(dev)) {
+ if (i915.enable_execlists) {
+ /* NB: intentionally left blank. We will allocate our own
+ * backing objects as we need them, thank you very much */
+ dev_priv->hw_context_size = 0;
+ } else if (HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
@@ -426,18 +344,23 @@ int i915_gem_context_init(struct drm_device *dev)
}
}
- ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+ ctx = i915_gem_create_context(dev, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
- /* NB: RCS will hold a ref for all rings */
- for (i = 0; i < I915_NUM_RINGS; i++)
- dev_priv->ring[i].default_context = ctx;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
- DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
+ /* NB: RCS will hold a ref for all rings */
+ ring->default_context = ctx;
+ }
+
+ DRM_DEBUG_DRIVER("%s context support initialized\n",
+ i915.enable_execlists ? "LR" :
+ dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
@@ -489,13 +412,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
struct intel_engine_cs *ring;
int ret, i;
- /* This is the only place the aliasing PPGTT gets enabled, which means
- * it has to happen before we bail on reset */
- if (dev_priv->mm.aliasing_ppgtt) {
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- ppgtt->enable(ppgtt);
- }
-
/* FIXME: We should make this work, even in reset */
if (i915_reset_in_progress(&dev_priv->gpu_error))
return 0;
@@ -527,7 +443,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
- ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx)) {
@@ -614,7 +530,6 @@ static int do_switch(struct intel_engine_cs *ring,
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
- struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
@@ -642,8 +557,8 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
- if (USES_FULL_PPGTT(ring->dev)) {
- ret = ppgtt->switch_mm(ppgtt, ring, false);
+ if (to->ppgtt) {
+ ret = to->ppgtt->switch_mm(to->ppgtt, ring, false);
if (ret)
goto unpin_out;
}
@@ -766,9 +681,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
return do_switch(ring, to);
}
-static bool hw_context_enabled(struct drm_device *dev)
+static bool contexts_enabled(struct drm_device *dev)
{
- return to_i915(dev)->hw_context_size;
+ return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -779,14 +694,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
- if (!hw_context_enabled(dev))
+ if (!contexts_enabled(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 580aa42443ed..82a1f4b57778 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
+ NULL);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2dd19da6b4b3..1a0611bb576b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,7 @@
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define BATCH_OFFSET_BIAS (256*1024)
@@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
struct i915_address_space *vm,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
@@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
- struct i915_address_space *bind_vm = vm;
-
- if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
- USES_FULL_PPGTT(vm->dev)) {
- ret = -EINVAL;
- goto err;
- }
-
- /* If we have secure dispatch, or the userspace assures us that
- * they know what they're doing, use the GGTT VM.
- */
- if (((args->flags & I915_EXEC_SECURE) &&
- (i == (args->buffer_count - 1))))
- bind_vm = &dev_priv->gtt.base;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
@@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset;
- uint32_t __iomem *reloc_entry;
+ uint64_t offset;
void __iomem *reloc_page;
int ret;
@@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
- reloc->offset += i915_gem_obj_ggtt_offset(obj);
+ offset = i915_gem_obj_ggtt_offset(obj);
+ offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + offset_in_page(reloc->offset));
- iowrite32(lower_32_bits(delta), reloc_entry);
+ offset & PAGE_MASK);
+ iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
if (INTEL_INFO(dev)->gen >= 8) {
- reloc_entry += 1;
+ offset += sizeof(uint32_t);
- if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+ if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
- reloc_page = io_mapping_map_atomic_wc(
- dev_priv->gtt.mappable,
- reloc->offset + sizeof(uint32_t));
- reloc_entry = reloc_page;
+ reloc_page =
+ io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ offset);
}
- iowrite32(upper_32_bits(delta), reloc_entry);
+ iowrite32(upper_32_bits(delta),
+ reloc_page + offset_in_page(offset));
}
io_mapping_unmap_atomic(reloc_page);
@@ -535,34 +520,18 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
}
static int
-need_reloc_mappable(struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
- i915_is_ggtt(vma->vm);
-}
-
-static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
- bool need_fence;
uint64_t flags;
int ret;
flags = 0;
-
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
- if (need_fence || need_reloc_mappable(vma))
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
flags |= PIN_MAPPABLE;
-
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
entry->flags |= __EXEC_OBJECT_HAS_PIN;
- if (has_fenced_gpu_access) {
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- ret = i915_gem_object_get_fence(obj);
- if (ret)
- return ret;
-
- if (i915_gem_object_pin_fence(obj))
- entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ return ret;
- obj->pending_fenced_gpu_access = true;
- }
+ if (i915_gem_object_pin_fence(obj))
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
if (entry->offset != vma->node.start) {
@@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
}
static bool
-eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+need_reloc_mappable(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- struct drm_i915_gem_object *obj = vma->obj;
- bool need_fence, need_mappable;
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(vma);
+ if (entry->relocation_count == 0)
+ return false;
+
+ if (!i915_is_ggtt(vma->vm))
+ return false;
+
+ /* See also use_cpu_reloc() */
+ if (HAS_LLC(vma->obj->base.dev))
+ return false;
- WARN_ON((need_mappable || need_fence) &&
+ if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return false;
+
+ return true;
+}
+
+static bool
+eb_vma_misplaced(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_is_ggtt(vma->vm));
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
return true;
- if (need_mappable && !obj->map_and_fenceable)
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
return true;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
@@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
- if (list_empty(vmas))
- return 0;
-
i915_gem_retire_requests_ring(ring);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
obj = vma->obj;
entry = vma->exec_entry;
+ if (!has_fenced_gpu_access)
+ entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
- has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
- if (need_mappable)
+ if (need_mappable) {
+ entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
list_move(&vma->exec_list, &ordered_vmas);
- else
+ } else
list_move_tail(&vma->exec_list, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
- obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_vmas, vmas);
@@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (!drm_mm_node_allocated(&vma->node))
continue;
- if (eb_vma_misplaced(vma, has_fenced_gpu_access))
+ if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
int i, total, ret;
unsigned count = args->buffer_count;
- if (WARN_ON(list_empty(&eb->vmas)))
- return 0;
-
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
/* We may process another execbuffer during the unlock... */
@@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
}
static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+validate_exec_list(struct drm_device *dev,
+ struct drm_i915_gem_exec_object2 *exec,
int count)
{
- int i;
unsigned relocs_total = 0;
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+ unsigned invalid_flags;
+ int i;
+
+ invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+ if (USES_FULL_PPGTT(dev))
+ invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
- if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+ if (exec[i].flags & invalid_flags)
return -EINVAL;
/* First check for malicious input causing overflow in
@@ -951,16 +931,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
+ if (i915.enable_execlists && !ctx->engine[ring->id].state) {
+ int ret = intel_lr_context_deferred_create(ctx, ring);
+ if (ret) {
+ DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
return ctx;
}
-static void
+void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
+ u32 seqno = intel_ring_get_seqno(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
@@ -969,24 +959,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = intel_ring_get_seqno(ring);
+ obj->last_write_seqno = seqno;
intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ obj->last_fenced_seqno = seqno;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
-static void
+void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
@@ -1026,14 +1023,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
-static int
-legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+int
+i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1254,13 +1251,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (!i915_gem_check_execbuffer(args))
return -EINVAL;
- ret = validate_exec_list(exec, args->buffer_count);
+ ret = validate_exec_list(dev, exec, args->buffer_count);
if (ret)
return ret;
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
- if (!drm_is_master(file) || !capable(CAP_SYS_ADMIN))
+ if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
flags |= I915_DISPATCH_SECURE;
@@ -1318,8 +1315,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_context_reference(ctx);
- vm = ctx->vm;
- if (!USES_FULL_PPGTT(dev))
+ if (ctx->ppgtt)
+ vm = &ctx->ppgtt->base;
+ else
vm = &dev_priv->gtt.base;
eb = eb_create(args);
@@ -1369,7 +1367,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_parse_cmds(ring,
batch_obj,
args->batch_start_offset,
- drm_is_master(file));
+ file->is_master);
if (ret)
goto err;
@@ -1386,25 +1384,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE &&
- !batch_obj->has_global_gtt_mapping) {
- /* When we have multiple VMs, we'll need to make sure that we
- * allocate space first */
- struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
- BUG_ON(!vma);
- vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
- }
+ if (flags & I915_DISPATCH_SECURE) {
+ /*
+ * So on first glance it looks freaky that we pin the batch here
+ * outside of the reservation loop. But:
+ * - The batch is already pinned into the relevant ppgtt, so we
+ * already have the backing storage fully allocated.
+ * - No other BO uses the global gtt (well contexts, but meh),
+ * so we don't really have issues with mutliple objects not
+ * fitting due to fragmentation.
+ * So this is actually safe.
+ */
+ ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+ if (ret)
+ goto err;
- if (flags & I915_DISPATCH_SECURE)
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
- else
+ } else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
- args, &eb->vmas, batch_obj, exec_start, flags);
- if (ret)
- goto err;
+ ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
+ &eb->vmas, batch_obj, exec_start, flags);
+ /*
+ * FIXME: We crucially rely upon the active tracking for the (ppgtt)
+ * batch vma for correctness. For less ugly and less fragility this
+ * needs to be adjusted to also track the ggtt batch vma properly as
+ * active.
+ */
+ if (flags & I915_DISPATCH_SECURE)
+ i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5188936bca0a..4db237065610 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,17 +33,6 @@
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
-bool intel_enable_ppgtt(struct drm_device *dev, bool full)
-{
- if (i915.enable_ppgtt == 0)
- return false;
-
- if (i915.enable_ppgtt == 1 && full)
- return false;
-
- return true;
-}
-
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
@@ -78,7 +67,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
-static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
@@ -403,9 +391,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- list_del(&vm->global_link);
- drm_mm_takedown(&vm->mm);
-
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
}
@@ -615,7 +600,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
kunmap_atomic(pd_vaddr);
}
- ppgtt->enable = gen8_ppgtt_enable;
ppgtt->switch_mm = gen8_mm_switch;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
@@ -836,39 +820,26 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_enable(struct drm_device *dev)
{
- struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- int j, ret;
+ int j;
+
+ /* In the case of execlists, PPGTT is enabled by the context descriptor
+ * and the PDPs are contained within the context itself. We don't
+ * need to do anything here. */
+ if (i915.enable_execlists)
+ return;
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- /* We promise to do a switch later with FULL PPGTT. If this is
- * aliasing, this is the one and only switch we'll do */
- if (USES_FULL_PPGTT(dev))
- continue;
-
- ret = ppgtt->switch_mm(ppgtt, ring, true);
- if (ret)
- goto err_out;
}
-
- return 0;
-
-err_out:
- for_each_ring(ring, dev_priv, j)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
- return ret;
}
-static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+static void gen7_ppgtt_enable(struct drm_device *dev)
{
- struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t ecochk, ecobits;
@@ -887,31 +858,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
I915_WRITE(GAM_ECOCHK, ecochk);
for_each_ring(ring, dev_priv, i) {
- int ret;
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- /* We promise to do a switch later with FULL PPGTT. If this is
- * aliasing, this is the one and only switch we'll do */
- if (USES_FULL_PPGTT(dev))
- continue;
-
- ret = ppgtt->switch_mm(ppgtt, ring, true);
- if (ret)
- return ret;
}
-
- return 0;
}
-static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_enable(struct drm_device *dev)
{
- struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
uint32_t ecochk, gab_ctl, ecobits;
- int i;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
@@ -924,14 +880,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- for_each_ring(ring, dev_priv, i) {
- int ret = ppgtt->switch_mm(ppgtt, ring, true);
- if (ret)
- return ret;
- }
-
- return 0;
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1029,8 +977,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- list_del(&vm->global_link);
- drm_mm_takedown(&ppgtt->base.mm);
drm_mm_remove_node(&ppgtt->node);
gen6_ppgtt_unmap_pages(ppgtt);
@@ -1151,13 +1097,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
if (IS_GEN6(dev)) {
- ppgtt->enable = gen6_ppgtt_enable;
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
- ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = hsw_mm_switch;
} else if (IS_GEN7(dev)) {
- ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = gen7_mm_switch;
} else
BUG();
@@ -1188,39 +1131,108 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
+ gen6_write_pdes(ppgtt);
+ DRM_DEBUG("Adding PPGTT at offset %x\n",
+ ppgtt->pd_offset << 10);
+
return 0;
}
-int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
ppgtt->base.dev = dev;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- ret = gen6_ppgtt_init(ppgtt);
+ return gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev))
- ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
+ return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
+}
+int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
- if (!ret) {
- struct drm_i915_private *dev_priv = dev->dev_private;
+ ret = __hw_ppgtt_init(dev, ppgtt);
+ if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
i915_init_vm(dev_priv, &ppgtt->base);
- if (INTEL_INFO(dev)->gen < 8) {
- gen6_write_pdes(ppgtt);
- DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd_offset << 10);
+ }
+
+ return ret;
+}
+
+int i915_ppgtt_init_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i, ret = 0;
+
+ if (!USES_PPGTT(dev))
+ return 0;
+
+ if (IS_GEN6(dev))
+ gen6_ppgtt_enable(dev);
+ else if (IS_GEN7(dev))
+ gen7_ppgtt_enable(dev);
+ else if (INTEL_INFO(dev)->gen >= 8)
+ gen8_ppgtt_enable(dev);
+ else
+ WARN_ON(1);
+
+ if (ppgtt) {
+ for_each_ring(ring, dev_priv, i) {
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret != 0)
+ return ret;
}
}
return ret;
}
+struct i915_hw_ppgtt *
+i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = i915_ppgtt_init(dev, ppgtt);
+ if (ret) {
+ kfree(ppgtt);
+ return ERR_PTR(ret);
+ }
+
+ ppgtt->file_priv = fpriv;
+
+ return ppgtt;
+}
+
+void i915_ppgtt_release(struct kref *kref)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(kref, struct i915_hw_ppgtt, ref);
+
+ /* vmas should already be unbound */
+ WARN_ON(!list_empty(&ppgtt->base.active_list));
+ WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+
+ list_del(&ppgtt->base.global_link);
+ drm_mm_takedown(&ppgtt->base.mm);
+
+ ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
+}
static void
ppgtt_bind_vma(struct i915_vma *vma,
@@ -1415,7 +1427,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
- dma_addr_t addr = 0;
+ dma_addr_t addr = 0; /* shut up gcc */
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
@@ -1461,7 +1473,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
@@ -1475,9 +1487,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
- if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true, flags));
+ if (i != 0) {
+ unsigned long gtt = readl(&gtt_entries[i-1]);
+ WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
+ }
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -1674,10 +1687,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
}
}
-void i915_gem_setup_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+int i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
@@ -1693,6 +1706,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
+ int ret;
BUG_ON(mappable_end > end);
@@ -1704,14 +1718,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
- int ret;
+
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
- if (ret)
- DRM_DEBUG_KMS("Reservation failed\n");
+ if (ret) {
+ DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
+ return ret;
+ }
obj->has_global_gtt_mapping = 1;
}
@@ -1728,6 +1744,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* And finally clear the reserved guard page */
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+
+ if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return -ENOMEM;
+
+ ret = __hw_ppgtt_init(dev, ppgtt);
+ if (ret != 0)
+ return ret;
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ }
+
+ return 0;
}
void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1741,6 +1773,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
+void i915_global_gtt_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ ppgtt->base.cleanup(&ppgtt->base);
+ }
+
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
+
+ vm->cleanup(vm);
+}
+
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2009,10 +2060,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
- if (drm_mm_initialized(&vm->mm)) {
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
- }
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
@@ -2045,10 +2092,6 @@ static int i915_gmch_probe(struct drm_device *dev,
static void i915_gmch_remove(struct i915_address_space *vm)
{
- if (drm_mm_initialized(&vm->mm)) {
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
- }
intel_gmch_remove();
}
@@ -2163,5 +2206,8 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
+ if (!i915_is_ggtt(vm))
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+
return vma;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8d6f7c18c404..6280648d4805 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
+struct drm_i915_file_private;
+
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
@@ -258,7 +260,7 @@ struct i915_hw_ppgtt {
dma_addr_t *gen8_pt_dma_addr[4];
};
- struct intel_context *ctx;
+ struct drm_i915_file_private *file_priv;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
@@ -269,11 +271,26 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
-
-bool intel_enable_ppgtt(struct drm_device *dev, bool full);
-int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
+void i915_global_gtt_cleanup(struct drm_device *dev);
+
+
+int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+int i915_ppgtt_init_hw(struct drm_device *dev);
+void i915_ppgtt_release(struct kref *kref);
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+ struct drm_i915_file_private *fpriv);
+static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+{
+ if (ppgtt)
+ kref_get(&ppgtt->ref);
+}
+static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
+{
+ if (ppgtt)
+ kref_put(&ppgtt->ref, i915_ppgtt_release);
+}
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cb150e8b4336..7e623bf097a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (ret == 0) {
obj->fence_dirty =
- obj->fenced_gpu_access ||
+ obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0b3f69439451..35e70d5d6282 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
- err_printf(m, "%s [%d]:\n", name, count);
+ err_printf(m, " %s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x %8u %02x %02x %x %x",
+ err_printf(m, " %08x %8u %02x %02x %x %x",
err->gtt_offset,
err->size,
err->read_domains,
@@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
return "wait";
case HANGCHECK_ACTIVE:
return "active";
+ case HANGCHECK_ACTIVE_LOOP:
+ return "active (loop)";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
@@ -359,6 +361,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ for (i = 0; i < 4; i++)
+ err_printf(m, "GTIER gt %d: 0x%08x\n", i,
+ error->gtier[i]);
+ } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
@@ -385,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
i915_ring_error_state(m, dev, &error->ring[i]);
}
- if (error->active_bo)
+ for (i = 0; i < error->vm_count; i++) {
+ err_printf(m, "vm[%d]\n", i);
+
print_error_buffers(m, "Active",
- error->active_bo[0],
- error->active_bo_count[0]);
+ error->active_bo[i],
+ error->active_bo_count[i]);
- if (error->pinned_bo)
print_error_buffers(m, "Pinned",
- error->pinned_bo[0],
- error->pinned_bo_count[0]);
+ error->pinned_bo[i],
+ error->pinned_bo_count[i]);
+ }
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
obj = error->ring[i].batchbuffer;
@@ -636,13 +646,15 @@ unwind:
(src)->base.size>>PAGE_SHIFT)
static void capture_bo(struct drm_i915_error_buffer *err,
- struct drm_i915_gem_object *obj)
+ struct i915_vma *vma)
{
+ struct drm_i915_gem_object *obj = vma->obj;
+
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
- err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
@@ -666,7 +678,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, mm_list) {
- capture_bo(err++, vma->obj);
+ capture_bo(err++, vma);
if (++i == count)
break;
}
@@ -675,21 +687,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
}
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
+ int count, struct list_head *head,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
- int i = 0;
+ struct drm_i915_error_buffer * const first = err;
+ struct drm_i915_error_buffer * const last = err + count;
list_for_each_entry(obj, head, global_list) {
- if (!i915_gem_obj_is_pinned(obj))
- continue;
+ struct i915_vma *vma;
- capture_bo(err++, obj);
- if (++i == count)
+ if (err == last)
break;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm && vma->pin_count > 0) {
+ capture_bo(err++, vma);
+ break;
+ }
}
- return i;
+ return err - first;
}
/* Generate a semi-unique error code. The code is not meant to have meaning, The
@@ -784,7 +802,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (ring == to)
continue;
- signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
+ signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+ / 4;
tmp = error->semaphore_obj->pages[0];
idx = intel_ring_sync_index(ring, to);
@@ -958,6 +977,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
request = i915_gem_find_active_request(ring);
if (request) {
+ struct i915_address_space *vm;
+
+ vm = request->ctx && request->ctx->ppgtt ?
+ &request->ctx->ppgtt->base :
+ &dev_priv->gtt.base;
+
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
@@ -965,9 +990,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].batchbuffer =
i915_error_object_create(dev_priv,
request->batch_obj,
- request->ctx ?
- request->ctx->vm :
- &dev_priv->gtt.base);
+ vm);
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
ring->scratch.obj)
@@ -1040,9 +1063,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(vma, &vm->active_list, mm_list)
i++;
error->active_bo_count[ndx] = i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (i915_gem_obj_is_pinned(obj))
- i++;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm && vma->pin_count > 0) {
+ i++;
+ break;
+ }
+ }
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
@@ -1061,7 +1089,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->pinned_bo_count[ndx] =
capture_pinned_bo(pinned_bo,
error->pinned_bo_count[ndx],
- &dev_priv->mm.bound_list);
+ &dev_priv->mm.bound_list, vm);
error->active_bo[ndx] = active_bo;
error->pinned_bo[ndx] = pinned_bo;
}
@@ -1082,8 +1110,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
GFP_ATOMIC);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- i915_gem_capture_vm(dev_priv, error, vm, i++);
+ if (error->active_bo == NULL ||
+ error->pinned_bo == NULL ||
+ error->active_bo_count == NULL ||
+ error->pinned_bo_count == NULL) {
+ kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo);
+ kfree(error->pinned_bo_count);
+
+ error->active_bo = NULL;
+ error->active_bo_count = NULL;
+ error->pinned_bo = NULL;
+ error->pinned_bo_count = NULL;
+ } else {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_capture_vm(dev_priv, error, vm, i++);
+
+ error->vm_count = cnt;
+ }
}
/* Capture all registers which don't fit into another category. */
@@ -1091,6 +1136,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct drm_device *dev = dev_priv->dev;
+ int i;
/* General organization
* 1. Registers specific to a single generation
@@ -1102,7 +1148,8 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(dev)) {
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ error->gtier[0] = I915_READ(GTIER);
+ error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV);
}
@@ -1135,16 +1182,18 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else {
- if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ for (i = 0; i < 4; i++)
+ error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+ } else if (HAS_PCH_SPLIT(dev)) {
+ error->ier = I915_READ(DEIER);
+ error->gtier[0] = I915_READ(GTIER);
+ } else if (IS_GEN2(dev)) {
+ error->ier = I915_READ16(IER);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ error->ier = I915_READ(IER);
}
-
- /* 4: Everything else */
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 20dd9e233fc6..7391697c25e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (!intel_irqs_enabled(dev_priv))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != mask) {
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
* some connectors */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_timer(&dev_priv->hotplug_reenable_timer,
- jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
-{
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
-}
-
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1327,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
* @dev_priv: DRM device private
*
*/
-static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
- u8 new_delay, adj;
+ int new_delay, adj;
dev_priv->rps.ei_interrupt_count++;
@@ -1632,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
+ struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
@@ -1641,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
+
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
- bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(dev, ring);
+ if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
+
+ bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ notify_ring(dev, ring);
+ if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1656,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
+
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS2]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1682,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+ ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VECS]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1777,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
- DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
+ port_name(port),
+ long_hpd ? "long" : "short");
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
@@ -1989,14 +2007,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
{
- struct intel_crtc *crtc;
-
if (!drm_handle_vblank(dev, pipe))
return false;
- crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- wake_up(&crtc->vbl_wait);
-
return true;
}
@@ -3189,8 +3202,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
- if (ring->hangcheck.acthd != acthd)
- return HANGCHECK_ACTIVE;
+ if (acthd != ring->hangcheck.acthd) {
+ if (acthd > ring->hangcheck.max_acthd) {
+ ring->hangcheck.max_acthd = acthd;
+ return HANGCHECK_ACTIVE;
+ }
+
+ return HANGCHECK_ACTIVE_LOOP;
+ }
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
@@ -3301,8 +3320,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
- break;
case HANGCHECK_ACTIVE:
+ break;
+ case HANGCHECK_ACTIVE_LOOP:
ring->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
@@ -3322,6 +3342,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
+
+ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
}
ring->hangcheck.seqno = seqno;
@@ -3518,18 +3540,17 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
@@ -3783,12 +3804,17 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
@@ -3883,8 +3909,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
gen8_irq_reset(dev);
}
@@ -3899,8 +3923,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
- intel_hpd_irq_uninstall(dev_priv);
-
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3979,8 +4001,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
ironlake_irq_reset(dev);
}
@@ -4351,8 +4371,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- intel_hpd_irq_uninstall(dev_priv);
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4448,7 +4466,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
static void i915_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
@@ -4459,7 +4476,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
@@ -4589,8 +4606,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4606,14 +4621,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable(unsigned long data)
+static void intel_hpd_irq_reenable(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
int i;
+ intel_runtime_pm_get(dev_priv);
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
@@ -4639,6 +4658,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
void intel_irq_init(struct drm_device *dev)
@@ -4661,8 +4682,8 @@ void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
- setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
- (unsigned long) dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
+ intel_hpd_irq_reenable);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 62ee8308d682..139f490d464d 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,9 +35,10 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
+ .enable_execlists = 0,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 1,
+ .enable_psr = 0,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
@@ -118,8 +119,13 @@ MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
+MODULE_PARM_DESC(enable_execlists,
+ "Override execlists usage. "
+ "(-1=auto, 0=disabled [default], 1=enabled)");
+
module_param_named(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index fe5c27630e95..203062e93452 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -272,6 +272,7 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
@@ -282,6 +283,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -497,10 +499,26 @@
#define BUNIT_REG_BISOC 0x11
#define PUNIT_REG_DSPFREQ 0x36
+#define DSPFREQSTAT_SHIFT_CHV 24
+#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
+#define DSPFREQGUAR_SHIFT_CHV 8
+#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV)
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
+#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
+#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
+#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe))
+#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe))
+#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe))
+#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16))
+#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe))
+#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe))
+#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe))
+#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
+#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
/* See the PUNIT HAS v0.8 for the below bits */
enum punit_power_well {
@@ -514,6 +532,11 @@ enum punit_power_well {
PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
+ PUNIT_POWER_WELL_DPIO_CMN_D = 12,
+ /* FIXME: guesswork below */
+ PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
+ PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
+ PUNIT_POWER_WELL_DPIO_RX2 = 15,
PUNIT_POWER_WELL_NUM,
};
@@ -834,8 +857,8 @@ enum punit_power_well {
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
-#define DPIO_SWING_MARGIN_SHIFT 16
-#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
+#define DPIO_SWING_MARGIN000_SHIFT 16
+#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
@@ -843,12 +866,16 @@ enum punit_power_well {
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define DPIO_SWING_MARGIN101_SHIFT 16
+#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
#define _VLV_TX_DW4_CH0 0x8290
#define _VLV_TX_DW4_CH1 0x8490
#define DPIO_SWING_DEEMPH9P5_SHIFT 24
#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
+#define DPIO_SWING_DEEMPH6P0_SHIFT 16
+#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
#define _VLV_TX3_DW4_CH0 0x690
@@ -1060,6 +1087,7 @@ enum punit_power_well {
#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
+#define RING_HWSTAM(base) ((base)+0x98)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
@@ -1376,6 +1404,7 @@ enum punit_power_well {
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -1515,6 +1544,7 @@ enum punit_power_well {
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE 0x43200
#define ILK_DPFC_CONTROL 0x43208
+#define FBC_CTL_FALSE_COLOR (1<<10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL 0x4320c
@@ -1671,12 +1701,9 @@ enum punit_power_well {
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
-#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
- ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
-#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
- ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3472,6 +3499,8 @@ enum punit_power_well {
#define DP_LINK_TRAIN_OFF (3 << 28)
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14)
+#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14))
/* CPT Link training mode */
#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
@@ -3728,7 +3757,6 @@ enum punit_power_well {
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
@@ -3838,73 +3866,151 @@ enum punit_power_well {
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
+/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
-#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
-#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f<<16)
-#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f<<8)
-#define DSPFW_PLANEA_MASK (0x7f)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
+#define DSPFW_PLANEA_SHIFT 0
+#define DSPFW_PLANEA_MASK (0x7f<<0)
+#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
-#define DSPFW_CURSORA_MASK 0x00003f00
-#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_PLANEC_MASK (0x7f)
+#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
+#define DSPFW_FBC_SR_SHIFT 28
+#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
+#define DSPFW_FBC_HPLL_SR_SHIFT 24
+#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
+#define DSPFW_SPRITEB_SHIFT (16)
+#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_CURSORA_MASK (0x3f<<8)
+#define DSPFW_PLANEC_SHIFT_OLD 0
+#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_SPRITEA_SHIFT 0
+#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1<<31)
-#define DSPFW_CURSOR_SR_SHIFT 24
+#define DSPFW_HPLL_SR_EN (1<<31)
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_SHIFT 24
#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
-#define DSPFW_HPLL_SR_MASK (0x1ff)
-#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
-#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
+#define DSPFW_HPLL_SR_SHIFT 0
+#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
+
+/* vlv/chv */
+#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070)
+#define DSPFW_SPRITEB_WM1_SHIFT 16
+#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
+#define DSPFW_CURSORA_WM1_SHIFT 8
+#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
+#define DSPFW_SPRITEA_WM1_SHIFT 0
+#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
+#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074)
+#define DSPFW_PLANEB_WM1_SHIFT 24
+#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEA_WM1_SHIFT 16
+#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
+#define DSPFW_CURSORB_WM1_SHIFT 8
+#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSOR_SR_WM1_SHIFT 0
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
+#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078)
+#define DSPFW_SR_WM1_SHIFT 0
+#define DSPFW_SR_WM1_MASK (0x1ff<<0)
+#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define DSPFW_SPRITED_WM1_SHIFT 24
+#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITED_SHIFT 16
+#define DSPFW_SPRITED_MASK (0xff<<16)
+#define DSPFW_SPRITEC_WM1_SHIFT 8
+#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEC_SHIFT 0
+#define DSPFW_SPRITEC_MASK (0xff<<0)
+#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
+#define DSPFW_SPRITEF_WM1_SHIFT 24
+#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITEF_SHIFT 16
+#define DSPFW_SPRITEF_MASK (0xff<<16)
+#define DSPFW_SPRITEE_WM1_SHIFT 8
+#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEE_SHIFT 0
+#define DSPFW_SPRITEE_MASK (0xff<<0)
+#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define DSPFW_PLANEC_WM1_SHIFT 24
+#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEC_SHIFT 16
+#define DSPFW_PLANEC_MASK (0xff<<16)
+#define DSPFW_CURSORC_WM1_SHIFT 8
+#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
+#define DSPFW_CURSORC_SHIFT 0
+#define DSPFW_CURSORC_MASK (0x3f<<0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
+#define DSPFW_SR_HI_SHIFT 24
+#define DSPFW_SR_HI_MASK (1<<24)
+#define DSPFW_SPRITEF_HI_SHIFT 23
+#define DSPFW_SPRITEF_HI_MASK (1<<23)
+#define DSPFW_SPRITEE_HI_SHIFT 22
+#define DSPFW_SPRITEE_HI_MASK (1<<22)
+#define DSPFW_PLANEC_HI_SHIFT 21
+#define DSPFW_PLANEC_HI_MASK (1<<21)
+#define DSPFW_SPRITED_HI_SHIFT 20
+#define DSPFW_SPRITED_HI_MASK (1<<20)
+#define DSPFW_SPRITEC_HI_SHIFT 16
+#define DSPFW_SPRITEC_HI_MASK (1<<16)
+#define DSPFW_PLANEB_HI_SHIFT 12
+#define DSPFW_PLANEB_HI_MASK (1<<12)
+#define DSPFW_SPRITEB_HI_SHIFT 8
+#define DSPFW_SPRITEB_HI_MASK (1<<8)
+#define DSPFW_SPRITEA_HI_SHIFT 4
+#define DSPFW_SPRITEA_HI_MASK (1<<4)
+#define DSPFW_PLANEA_HI_SHIFT 0
+#define DSPFW_PLANEA_HI_MASK (1<<0)
+#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
+#define DSPFW_SR_WM1_HI_SHIFT 24
+#define DSPFW_SR_WM1_HI_MASK (1<<24)
+#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
+#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
+#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
+#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
+#define DSPFW_PLANEC_WM1_HI_SHIFT 21
+#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
+#define DSPFW_SPRITED_WM1_HI_SHIFT 20
+#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
+#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
+#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
+#define DSPFW_PLANEB_WM1_HI_SHIFT 12
+#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
+#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
+#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
+#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
+#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
+#define DSPFW_PLANEA_WM1_HI_SHIFT 0
+#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_16 16
-#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
-#define DDL_CURSORA_PRECISION_32 (1<<31)
-#define DDL_CURSORA_PRECISION_16 (0<<31)
-#define DDL_CURSORA_SHIFT 24
-#define DDL_SPRITEB_PRECISION_32 (1<<23)
-#define DDL_SPRITEB_PRECISION_16 (0<<23)
-#define DDL_SPRITEB_SHIFT 16
-#define DDL_SPRITEA_PRECISION_32 (1<<15)
-#define DDL_SPRITEA_PRECISION_16 (0<<15)
-#define DDL_SPRITEA_SHIFT 8
-#define DDL_PLANEA_PRECISION_32 (1<<7)
-#define DDL_PLANEA_PRECISION_16 (0<<7)
-#define DDL_PLANEA_SHIFT 0
-
-#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
-#define DDL_CURSORB_PRECISION_32 (1<<31)
-#define DDL_CURSORB_PRECISION_16 (0<<31)
-#define DDL_CURSORB_SHIFT 24
-#define DDL_SPRITED_PRECISION_32 (1<<23)
-#define DDL_SPRITED_PRECISION_16 (0<<23)
-#define DDL_SPRITED_SHIFT 16
-#define DDL_SPRITEC_PRECISION_32 (1<<15)
-#define DDL_SPRITEC_PRECISION_16 (0<<15)
-#define DDL_SPRITEC_SHIFT 8
-#define DDL_PLANEB_PRECISION_32 (1<<7)
-#define DDL_PLANEB_PRECISION_16 (0<<7)
-#define DDL_PLANEB_SHIFT 0
-
-#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
-#define DDL_CURSORC_PRECISION_32 (1<<31)
-#define DDL_CURSORC_PRECISION_16 (0<<31)
-#define DDL_CURSORC_SHIFT 24
-#define DDL_SPRITEF_PRECISION_32 (1<<23)
-#define DDL_SPRITEF_PRECISION_16 (0<<23)
-#define DDL_SPRITEF_SHIFT 16
-#define DDL_SPRITEE_PRECISION_32 (1<<15)
-#define DDL_SPRITEE_PRECISION_16 (0<<15)
-#define DDL_SPRITEE_SHIFT 8
-#define DDL_PLANEC_PRECISION_32 (1<<7)
-#define DDL_PLANEC_PRECISION_16 (0<<7)
-#define DDL_PLANEC_SHIFT 0
+#define DRAIN_LATENCY_PRECISION_64 64
+#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_PRECISION_64 (1<<31)
+#define DDL_CURSOR_PRECISION_32 (0<<31)
+#define DDL_CURSOR_SHIFT 24
+#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
+#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
+#define DDL_PLANE_PRECISION_64 (1<<7)
+#define DDL_PLANE_PRECISION_32 (0<<7)
+#define DDL_PLANE_SHIFT 0
+#define DRAIN_LATENCY_MASK 0x7f
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -4022,7 +4128,8 @@ enum punit_power_well {
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
-#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_STRIDE_SHIFT 28
+#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
@@ -4191,6 +4298,7 @@ enum punit_power_well {
#define DVS_YUV_ORDER_UYVY (1<<16)
#define DVS_YUV_ORDER_YVYU (2<<16)
#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_ROTATE_180 (1<<15)
#define DVS_DEST_KEY (1<<2)
#define DVS_TRICKLE_FEED_DISABLE (1<<14)
#define DVS_TILED (1<<10)
@@ -4261,6 +4369,7 @@ enum punit_power_well {
#define SPRITE_YUV_ORDER_UYVY (1<<16)
#define SPRITE_YUV_ORDER_YVYU (2<<16)
#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_ROTATE_180 (1<<15)
#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
#define SPRITE_INT_GAMMA_ENABLE (1<<13)
#define SPRITE_TILED (1<<10)
@@ -4334,6 +4443,7 @@ enum punit_power_well {
#define SP_YUV_ORDER_UYVY (1<<16)
#define SP_YUV_ORDER_YVYU (2<<16)
#define SP_YUV_ORDER_VYUY (3<<16)
+#define SP_ROTATE_180 (1<<15)
#define SP_TILED (1<<10)
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
@@ -5403,7 +5513,6 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
-#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 608ed302f24d..031c5657255d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -878,7 +878,7 @@ err:
/* error during parsing so set all pointers to null
* because of partial parsing */
- memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
+ memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -976,12 +976,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (bdb->version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
hdmi_level_shift = child->raw[7] & 0xF;
- if (hdmi_level_shift < 0xC) {
- DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
- port_name(port),
- hdmi_level_shift);
- info->hdmi_level_shift = hdmi_level_shift;
- }
+ DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
+ info->hdmi_level_shift = hdmi_level_shift;
}
}
@@ -1114,8 +1112,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
- /* Recommended BSpec default: 800mV 0dB. */
- info->hdmi_level_shift = 6;
+ info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index b98667796337..905999bee2ac 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -802,7 +802,8 @@ struct mipi_config {
u16 rsvd4;
- u8 rsvd5[5];
+ u8 rsvd5;
+ u32 target_burst_mode_freq;
u32 dsi_ddr_clk;
u32 bridge_ref_clk;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2efaf8e8d9c4..e8abfce40976 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
goto out;
}
+ drm_modeset_acquire_init(&ctx, 0);
+
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
out:
intel_display_power_put(dev_priv, power_domain);
return status;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5db0b5552e39..02d55843c78d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -33,7 +33,7 @@
* automatically adapt to HDMI connections as well
*/
static const u32 hsw_ddi_translations_dp[] = {
- 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00FFFFFF, 0x0006000E,
0x00D75FFF, 0x0005000A,
0x00C30FFF, 0x00040006,
0x80AAAFFF, 0x000B0000,
@@ -45,7 +45,7 @@ static const u32 hsw_ddi_translations_dp[] = {
};
static const u32 hsw_ddi_translations_fdi[] = {
- 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00FFFFFF, 0x0007000E,
0x00D75FFF, 0x000F000A,
0x00C30FFF, 0x00060006,
0x00AAAFFF, 0x001E0000,
@@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = {
};
static const u32 bdw_ddi_translations_edp[] = {
- 0x00FFFFFF, 0x00000012, /* eDP parameters */
+ 0x00FFFFFF, 0x00000012,
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00AAAFFF, 0x000E000A,
@@ -82,11 +82,10 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
- 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
static const u32 bdw_ddi_translations_dp[] = {
- 0x00FFFFFF, 0x0007000E, /* DP parameters */
+ 0x00FFFFFF, 0x0007000E,
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
0x80B2CFFF, 0x001B0002,
@@ -95,11 +94,10 @@ static const u32 bdw_ddi_translations_dp[] = {
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
- 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
static const u32 bdw_ddi_translations_fdi[] = {
- 0x00FFFFFF, 0x0001000E, /* FDI parameters */
+ 0x00FFFFFF, 0x0001000E,
0x00D75FFF, 0x0004000A,
0x00C30FFF, 0x00070006,
0x00AAAFFF, 0x000C0000,
@@ -108,7 +106,20 @@ static const u32 bdw_ddi_translations_fdi[] = {
0x00C30FFF, 0x000C0000,
0x00FFFFFF, 0x00070006,
0x00D75FFF, 0x000C0000,
- 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff T mV diff db */
+ 0x00FFFFFF, 0x0007000E, /* 0: 400 400 0 */
+ 0x00D75FFF, 0x000E000A, /* 1: 400 600 3.5 */
+ 0x00BEFFFF, 0x00140006, /* 2: 400 800 6 */
+ 0x00FFFFFF, 0x0009000D, /* 3: 450 450 0 */
+ 0x00FFFFFF, 0x000E000A, /* 4: 600 600 0 */
+ 0x00D7FFFF, 0x00140006, /* 5: 600 800 2.5 */
+ 0x80CB2FFF, 0x001B0002, /* 6: 600 1000 4.5 */
+ 0x00FFFFFF, 0x00140006, /* 7: 800 800 0 */
+ 0x80E79FFF, 0x001B0002, /* 8: 800 1000 2 */
+ 0x80FFFFFF, 0x001B0002, /* 9: 1000 1000 0 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -145,26 +156,36 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- int i;
+ int i, n_hdmi_entries, hdmi_800mV_0dB;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const u32 *ddi_translations_fdi;
const u32 *ddi_translations_dp;
const u32 *ddi_translations_edp;
+ const u32 *ddi_translations_hdmi;
const u32 *ddi_translations;
if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
+ ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
+ hdmi_800mV_0dB = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
+ ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi) / 2;
+ hdmi_800mV_0dB = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
+ ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
+ hdmi_800mV_0dB = 7;
}
switch (port) {
@@ -193,9 +214,15 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
+
+ /* Choose a good default if VBT is badly populated */
+ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+ hdmi_level >= n_hdmi_entries)
+ hdmi_level = hdmi_800mV_0dB;
+
/* Entry 9 is for HDMI: */
for (i = 0; i < 2; i++) {
- I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+ I915_WRITE(reg, ddi_translations_hdmi[hdmi_level * 2 + i]);
reg += 4;
}
}
@@ -587,8 +614,8 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+static void hsw_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -643,9 +670,15 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ hsw_ddi_clock_get(encoder, pipe_config);
+}
+
static void
-intel_ddi_calculate_wrpll(int clock /* in Hz */,
- unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
uint64_t freq2k;
unsigned p, n2, r2;
@@ -708,27 +741,17 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-/*
- * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
- * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
- * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
- * enable the PLL.
- */
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+static bool
+hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder,
+ int clock)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- int type = intel_encoder->type;
- int clock = intel_crtc->config.port_clock;
-
- intel_put_shared_dpll(intel_crtc);
-
- if (type == INTEL_OUTPUT_HDMI) {
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct intel_shared_dpll *pll;
uint32_t val;
unsigned p, n2, r2;
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+ hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -749,6 +772,25 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
return true;
}
+
+/*
+ * Tries to find a *shared* PLL for the CRTC and store it in
+ * intel_crtc->ddi_pll_sel.
+ *
+ * For private DPLLs, compute_config() should do the selection for us. This
+ * function should be folded into compute_config() eventually.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ int clock = intel_crtc->config.port_clock;
+
+ intel_put_shared_dpll(intel_crtc);
+
+ return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+}
+
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1183,31 +1225,52 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ return 800000;
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ return 450000;
+ else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ return 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ return 337500;
+ else
+ return 675000;
+}
+
+static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
- if (lcpll & LCPLL_CD_SOURCE_FCLK) {
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
- } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
- } else if (freq == LCPLL_CLK_FREQ_450) {
+ else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- } else if (IS_HASWELL(dev)) {
- if (IS_ULT(dev))
- return 337500;
- else
- return 540000;
- } else {
- if (freq == LCPLL_CLK_FREQ_54O_BDW)
- return 540000;
- else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- return 337500;
- else
- return 675000;
- }
+ else if (IS_ULT(dev))
+ return 337500;
+ else
+ return 540000;
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_BROADWELL(dev))
+ return bdw_get_cdclk_freq(dev_priv);
+
+ /* Haswell */
+ return hsw_get_cdclk_freq(dev_priv);
}
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1248,10 +1311,8 @@ static const char * const hsw_ddi_pll_names[] = {
"WRPLL 2",
};
-void intel_ddi_pll_init(struct drm_device *dev)
+static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
int i;
dev_priv->num_shared_dpll = 2;
@@ -1264,6 +1325,14 @@ void intel_ddi_pll_init(struct drm_device *dev)
dev_priv->shared_dplls[i].get_hw_state =
hsw_ddi_pll_get_hw_state;
}
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ hsw_shared_dplls_init(dev_priv);
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
@@ -1444,7 +1513,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- intel_ddi_clock_get(encoder, pipe_config);
+ hsw_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index de40a44e0ca0..0b327ebb2d9e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -91,15 +91,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
-static void intel_dp_set_m_n(struct intel_crtc *crtc);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n);
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+static void chv_prepare_pll(struct intel_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -1519,34 +1520,6 @@ static void intel_init_dpio(struct drm_device *dev)
}
}
-static void intel_reset_dpio(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_CHERRYVIEW(dev)) {
- enum dpio_phy phy;
- u32 val;
-
- for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
- /* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
- PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- /*
- * Deassert common lane reset for PHY.
- *
- * This should only be done on init and resume from S3
- * with both PLLs disabled, or we risk losing DPIO and
- * PLL synchronization.
- */
- val = I915_READ(DISPLAY_PHY_CONTROL);
- I915_WRITE(DISPLAY_PHY_CONTROL,
- PHY_COM_LANE_RESET_DEASSERT(phy, val));
- }
- }
-}
-
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1718,7 +1691,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
assert_pipe_disabled(dev_priv, pipe);
/* Set PLL en = 0 */
- val = DPLL_SSC_REF_CLOCK_CHV;
+ val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
@@ -1812,7 +1785,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll->refcount == 0))
return;
- DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
+ DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
@@ -1830,7 +1803,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
pll->on = true;
}
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2115,35 +2088,28 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
/**
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @dev_priv: i915 private structure
- * @plane: plane to enable
- * @pipe: pipe being fed
+ * @plane: plane to be enabled
+ * @crtc: crtc for the plane
*
- * Enable @plane on @pipe, making sure that @pipe is running first.
+ * Enable @plane on @crtc, making sure that the pipe is running first.
*/
-static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_enable_primary_hw_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc)
{
- struct drm_device *dev = dev_priv->dev;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- int reg;
- u32 val;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* If the pipe isn't enabled, we can't pump pixels and may hang */
- assert_pipe_enabled(dev_priv, pipe);
+ assert_pipe_enabled(dev_priv, intel_crtc->pipe);
if (intel_crtc->primary_enabled)
return;
intel_crtc->primary_enabled = true;
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
- WARN_ON(val & DISPLAY_PLANE_ENABLE);
-
- I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, plane);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
/*
* BDW signals flip done immediately if the plane
@@ -2156,31 +2122,27 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
/**
* intel_disable_primary_hw_plane - disable the primary hardware plane
- * @dev_priv: i915 private structure
- * @plane: plane to disable
- * @pipe: pipe consuming the data
+ * @plane: plane to be disabled
+ * @crtc: crtc for the plane
*
- * Disable @plane; should be an independent operation.
+ * Disable @plane on @crtc, making sure that the pipe is running first.
*/
-static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_disable_primary_hw_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc)
{
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- int reg;
- u32 val;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ assert_pipe_enabled(dev_priv, intel_crtc->pipe);
if (!intel_crtc->primary_enabled)
return;
intel_crtc->primary_enabled = false;
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
- WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
-
- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, plane);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
}
static bool need_vtd_wa(struct drm_device *dev)
@@ -2421,12 +2383,35 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
- u32 reg;
+ u32 reg = DSPCNTR(plane);
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(reg, 0);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(DSPSURF(plane), 0);
+ else
+ I915_WRITE(DSPADDR(plane), 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (intel_crtc->pipe == PIPE_B)
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+ }
- reg = DSPCNTR(plane);
- dspcntr = I915_READ(reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
@@ -2458,12 +2443,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
BUG();
}
- if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
+ if (INTEL_INFO(dev)->gen >= 4 &&
+ obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -2507,12 +2489,22 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
- u32 reg;
+ u32 reg = DSPCNTR(plane);
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(reg, 0);
+ I915_WRITE(DSPSURF(plane), 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- reg = DSPCNTR(plane);
- dspcntr = I915_READ(reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
@@ -2542,12 +2534,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
- else
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
@@ -3906,16 +3894,14 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
assert_vblank_disabled(crtc);
drm_vblank_on(dev, pipe);
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3952,7 +3938,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_disable_primary_hw_plane(crtc->primary, crtc);
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -3973,7 +3959,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
@@ -3990,18 +3975,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
+ &intel_crtc->config.fdi_m_n, NULL);
}
ironlake_set_pipeconf(crtc);
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4086,7 +4064,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
@@ -4103,20 +4080,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
+ &intel_crtc->config.fdi_m_n, NULL);
}
haswell_set_pipeconf(crtc);
intel_set_pipe_csc(crtc);
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4539,12 +4509,57 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
vlv_update_cdclk(dev);
}
+static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, cmd;
+
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+
+ switch (cdclk) {
+ case 400000:
+ cmd = 3;
+ break;
+ case 333333:
+ case 320000:
+ cmd = 2;
+ break;
+ case 266667:
+ cmd = 1;
+ break;
+ case 200000:
+ cmd = 0;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK_CHV;
+ val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ vlv_update_cdclk(dev);
+}
+
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int vco = valleyview_get_vco(dev_priv);
int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+ /* FIXME: Punit isn't quite ready yet */
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ return 400000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
@@ -4607,21 +4622,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (req_cdclk != dev_priv->vlv_cdclk_freq)
- valleyview_set_cdclk(dev, req_cdclk);
+ if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ if (IS_CHERRYVIEW(dev))
+ cherryview_set_cdclk(dev, req_cdclk);
+ else
+ valleyview_set_cdclk(dev, req_cdclk);
+ }
+
modeset_update_crtc_power_domains(dev);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
bool is_dsi;
- u32 dspcntr;
WARN_ON(!crtc->enabled);
@@ -4630,33 +4647,20 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
- if (!is_dsi && !IS_CHERRYVIEW(dev))
- vlv_prepare_pll(intel_crtc);
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ if (!is_dsi) {
+ if (IS_CHERRYVIEW(dev))
+ chv_prepare_pll(intel_crtc);
+ else
+ vlv_prepare_pll(intel_crtc);
+ }
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
i9xx_set_pipeconf(intel_crtc);
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4704,12 +4708,9 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- u32 dspcntr;
WARN_ON(!crtc->enabled);
@@ -4718,35 +4719,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
-
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
i9xx_set_pipeconf(intel_crtc);
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
if (!IS_GEN2(dev))
@@ -5275,6 +5254,10 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
u32 val;
int divider;
+ /* FIXME: Punit isn't quite ready yet */
+ if (IS_CHERRYVIEW(dev))
+ return 400000;
+
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
@@ -5519,7 +5502,8 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
}
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5531,6 +5515,18 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
+ * for gen < 8) and if DRRS is supported (to make sure the
+ * registers are not unnecessarily accessed).
+ */
+ if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ crtc->config.has_drrs) {
+ I915_WRITE(PIPE_DATA_M2(transcoder),
+ TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+ I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
+ I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
+ I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+ }
} else {
I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
@@ -5539,12 +5535,13 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-static void intel_dp_set_m_n(struct intel_crtc *crtc)
+void intel_dp_set_m_n(struct intel_crtc *crtc)
{
if (crtc->config.has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
+ &crtc->config.dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc)
@@ -5662,6 +5659,18 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
static void chv_update_pll(struct intel_crtc *crtc)
{
+ crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ DPLL_VCO_ENABLE;
+ if (crtc->pipe != PIPE_A)
+ crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ crtc->config.dpll_hw_state.dpll_md =
+ (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_prepare_pll(struct intel_crtc *crtc)
+{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
@@ -5671,15 +5680,6 @@ static void chv_update_pll(struct intel_crtc *crtc)
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
- crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
- DPLL_VCO_ENABLE;
- if (pipe != PIPE_A)
- crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- crtc->config.dpll_hw_state.dpll_md =
- (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-
bestn = crtc->config.dpll.n;
bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
bestm1 = crtc->config.dpll.m1;
@@ -6171,6 +6171,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
u32 mdiv;
int refclk = 100000;
+ /* In case of MIPI DPLL will not even be used */
+ if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+ return;
+
mutex_lock(&dev_priv->dpio_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
@@ -6231,7 +6235,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+ crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
@@ -6363,7 +6367,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
u32 val, final;
bool has_lvds = false;
@@ -6373,8 +6376,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
bool can_ssc = false;
/* We need to take the global config into account */
- list_for_each_entry(encoder, &mode_config->encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
@@ -6681,11 +6683,10 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
static void lpt_init_pch_refclk(struct drm_device *dev)
{
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
@@ -7141,7 +7142,8 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum transcoder transcoder,
- struct intel_link_m_n *m_n)
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7155,6 +7157,20 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
+ * gen < 8) and if DRRS is supported (to make sure the
+ * registers are not unnecessarily read).
+ */
+ if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ crtc->config.has_drrs) {
+ m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
+ m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
+ m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+ & ~TU_SIZE_MASK;
+ m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
+ m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ }
} else {
m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
@@ -7173,14 +7189,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->dp_m_n);
+ &pipe_config->dp_m_n,
+ &pipe_config->dp_m2_n2);
}
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->fdi_m_n);
+ &pipe_config->fdi_m_n, NULL);
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -7251,7 +7268,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+ crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
@@ -7611,6 +7628,22 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
+static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+}
+
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7624,16 +7657,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
- switch (pipe_config->ddi_pll_sel) {
- case PORT_CLK_SEL_WRPLL1:
- pipe_config->shared_dpll = DPLL_ID_WRPLL1;
- break;
- case PORT_CLK_SEL_WRPLL2:
- pipe_config->shared_dpll = DPLL_ID_WRPLL2;
- break;
- }
+ haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -8033,74 +8057,62 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t cntl;
+ uint32_t cntl = 0, size = 0;
- if (base != intel_crtc->cursor_base) {
- /* On these chipsets we can only modify the base whilst
- * the cursor is disabled.
- */
- if (intel_crtc->cursor_cntl) {
- I915_WRITE(_CURACNTR, 0);
- POSTING_READ(_CURACNTR);
- intel_crtc->cursor_cntl = 0;
+ if (base) {
+ unsigned int width = intel_crtc->cursor_width;
+ unsigned int height = intel_crtc->cursor_height;
+ unsigned int stride = roundup_pow_of_two(width) * 4;
+
+ switch (stride) {
+ default:
+ WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+ width, stride);
+ stride = 256;
+ /* fallthrough */
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
}
- I915_WRITE(_CURABASE, base);
- POSTING_READ(_CURABASE);
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(stride);
+
+ size = (height << 12) | width;
}
- /* XXX width must be 64, stride 256 => 0x00 << 28 */
- cntl = 0;
- if (base)
- cntl = (CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB);
- if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(_CURACNTR, cntl);
+ if (intel_crtc->cursor_cntl != 0 &&
+ (intel_crtc->cursor_base != base ||
+ intel_crtc->cursor_size != size ||
+ intel_crtc->cursor_cntl != cntl)) {
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ I915_WRITE(_CURACNTR, 0);
POSTING_READ(_CURACNTR);
- intel_crtc->cursor_cntl = cntl;
+ intel_crtc->cursor_cntl = 0;
}
-}
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint32_t cntl;
+ if (intel_crtc->cursor_base != base)
+ I915_WRITE(_CURABASE, base);
- cntl = 0;
- if (base) {
- cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
- case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
- break;
- default:
- WARN_ON(1);
- return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
+ if (intel_crtc->cursor_size != size) {
+ I915_WRITE(CURSIZE, size);
+ intel_crtc->cursor_size = size;
}
+
if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(CURCNTR(pipe), cntl);
- POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(_CURACNTR, cntl);
+ POSTING_READ(_CURACNTR);
intel_crtc->cursor_cntl = cntl;
}
-
- /* and commit changes on next vblank */
- I915_WRITE(CURBASE(pipe), base);
- POSTING_READ(CURBASE(pipe));
}
-static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8125,6 +8137,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
WARN_ON(1);
return;
}
+ cntl |= pipe << 28; /* Connect to correct pipe */
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
@@ -8184,15 +8197,50 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- ivb_update_cursor(crtc, base);
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
intel_crtc->cursor_base = base;
}
+static bool cursor_size_ok(struct drm_device *dev,
+ uint32_t width, uint32_t height)
+{
+ if (width == 0 || height == 0)
+ return false;
+
+ /*
+ * 845g/865g are special in that they are only limited by
+ * the width of their cursors, the height is arbitrary up to
+ * the precision of the register. Everything else requires
+ * square cursors, limited to a few power-of-two sizes.
+ */
+ if (IS_845G(dev) || IS_I865G(dev)) {
+ if ((width & 63) != 0)
+ return false;
+
+ if (width > (IS_845G(dev) ? 64 : 512))
+ return false;
+
+ if (height > 1023)
+ return false;
+ } else {
+ switch (width | height) {
+ case 256:
+ case 128:
+ if (IS_GEN2(dev))
+ return false;
+ case 64:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
*
@@ -8205,10 +8253,9 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned old_width;
+ unsigned old_width, stride;
uint32_t addr;
int ret;
@@ -8222,14 +8269,13 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
}
/* Check for which cursor types we support */
- if (!((width == 64 && height == 64) ||
- (width == 128 && height == 128 && !IS_GEN2(dev)) ||
- (width == 256 && height == 256 && !IS_GEN2(dev)))) {
+ if (!cursor_size_ok(dev, width, height)) {
DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
- if (obj->base.size < width * height * 4) {
+ stride = roundup_pow_of_two(width) * 4;
+ if (obj->base.size < stride * height) {
DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
@@ -8278,9 +8324,6 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
addr = obj->phys_handle->busaddr;
}
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, (height << 12) | width);
-
finish:
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -8468,8 +8511,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, connector->name,
encoder->base.id, encoder->name);
- drm_modeset_acquire_init(ctx, 0);
-
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
@@ -8508,10 +8549,14 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (!possible_crtc->enabled) {
- crtc = possible_crtc;
- break;
- }
+ if (possible_crtc->enabled)
+ continue;
+ /* This can occur when applying the pipe A quirk on resume. */
+ if (to_intel_crtc(possible_crtc)->new_enabled)
+ continue;
+
+ crtc = possible_crtc;
+ break;
}
/*
@@ -8580,15 +8625,11 @@ fail_unlock:
goto retry;
}
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-
return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
+ struct intel_load_detect_pipe *old)
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
@@ -8612,17 +8653,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
drm_framebuffer_unreference(old->release_fb);
}
- goto unlock;
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
-
-unlock:
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9522,6 +9558,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
return false;
else if (i915.use_mmio_flip > 0)
return true;
+ else if (i915.enable_execlists)
+ return true;
else
return ring != obj->ring;
}
@@ -9837,8 +9875,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
to_intel_encoder(connector->base.encoder);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
@@ -9869,8 +9906,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
connector->base.encoder = &connector->new_encoder->base;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->base.crtc = &encoder->new_crtc->base;
}
@@ -9997,6 +10033,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
+
+ DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
+ pipe_config->has_dp_encoder,
+ pipe_config->dp_m2_n2.gmch_m,
+ pipe_config->dp_m2_n2.gmch_n,
+ pipe_config->dp_m2_n2.link_m,
+ pipe_config->dp_m2_n2.link_n,
+ pipe_config->dp_m2_n2.tu);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10031,8 +10076,7 @@ static bool check_single_encoder_cloning(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
- list_for_each_entry(source_encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, source_encoder) {
if (source_encoder->new_crtc != crtc)
continue;
@@ -10048,8 +10092,7 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
@@ -10133,8 +10176,7 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (&encoder->new_crtc->base != crtc)
continue;
@@ -10212,8 +10254,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
1 << connector->new_encoder->new_crtc->pipe;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc == &encoder->new_crtc->base)
continue;
@@ -10287,8 +10328,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
- list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
@@ -10377,6 +10417,22 @@ intel_pipe_config_compare(struct drm_device *dev,
return false; \
}
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
+#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
+ if ((current_config->name != pipe_config->name) && \
+ (current_config->alt_name != pipe_config->name)) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i or %i, found %i)\n", \
+ current_config->name, \
+ current_config->alt_name, \
+ pipe_config->name); \
+ return false; \
+ }
+
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
@@ -10409,11 +10465,28 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(fdi_m_n.tu);
PIPE_CONF_CHECK_I(has_dp_encoder);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
- PIPE_CONF_CHECK_I(dp_m_n.link_m);
- PIPE_CONF_CHECK_I(dp_m_n.link_n);
- PIPE_CONF_CHECK_I(dp_m_n.tu);
+
+ if (INTEL_INFO(dev)->gen < 8) {
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m_n.link_m);
+ PIPE_CONF_CHECK_I(dp_m_n.link_n);
+ PIPE_CONF_CHECK_I(dp_m_n.tu);
+
+ if (current_config->has_drrs) {
+ PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
+ PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
+ PIPE_CONF_CHECK_I(dp_m2_n2.tu);
+ }
+ } else {
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
+ }
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
@@ -10499,6 +10572,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_I_ALT
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
@@ -10528,8 +10602,7 @@ check_encoder_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
bool enabled = false;
bool active = false;
enum pipe pipe, tracked_pipe;
@@ -10608,8 +10681,7 @@ check_crtc_state(struct drm_device *dev)
WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc != &crtc->base)
continue;
enabled = true;
@@ -10631,8 +10703,7 @@ check_crtc_state(struct drm_device *dev)
if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
active = crtc->active;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
@@ -11000,7 +11071,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
}
@@ -11159,8 +11230,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Check for any encoders that needs to be disabled. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
@@ -11193,9 +11263,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc) {
crtc->new_enabled = true;
break;
@@ -11232,7 +11300,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
connector->new_encoder = NULL;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc)
encoder->new_crtc = NULL;
}
@@ -11295,7 +11363,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
intel_crtc_wait_for_pending_flips(set->crtc);
@@ -11309,8 +11376,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
*/
if (!intel_crtc->primary_enabled && ret == 0) {
WARN_ON(!intel_crtc->active);
- intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
- intel_crtc->pipe);
+ intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
}
/*
@@ -11463,8 +11529,6 @@ static int
intel_primary_plane_disable(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
if (!plane->fb)
@@ -11487,8 +11551,8 @@ intel_primary_plane_disable(struct drm_plane *plane)
goto disable_unpin;
intel_crtc_wait_for_pending_flips(plane->crtc);
- intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
- intel_plane->pipe);
+ intel_disable_primary_hw_plane(plane, plane->crtc);
+
disable_unpin:
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
@@ -11508,9 +11572,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_rect dest = {
@@ -11597,9 +11659,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
if (intel_crtc->primary_enabled)
- intel_disable_primary_hw_plane(dev_priv,
- intel_plane->plane,
- intel_plane->pipe);
+ intel_disable_primary_hw_plane(plane, crtc);
if (plane->fb != fb)
@@ -11616,8 +11676,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
if (!intel_crtc->primary_enabled)
- intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
- intel_crtc->pipe);
+ intel_enable_primary_hw_plane(plane, crtc);
return 0;
}
@@ -11706,8 +11765,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
const struct drm_rect clip = {
/* integer pixels */
- .x2 = intel_crtc->config.pipe_src_w,
- .y2 = intel_crtc->config.pipe_src_h,
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
@@ -11726,6 +11785,10 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
intel_crtc_update_cursor(crtc, visible);
+
+ intel_frontbuffer_flip(crtc->dev,
+ INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
+
return 0;
}
}
@@ -11802,8 +11865,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
-
- init_waitqueue_head(&intel_crtc->vbl_wait);
+ intel_crtc->cursor_size = ~0;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
@@ -11866,8 +11928,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
int index_mask = 0;
int entry = 0;
- list_for_each_entry(source_encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
@@ -12056,7 +12117,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_edp_psr_init(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
@@ -12322,29 +12383,27 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- if (HAS_PCH_SPLIT(dev)) {
- if (IS_GEN5(dev)) {
- dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- } else if (IS_GEN6(dev)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- snb_modeset_global_resources;
- } else if (IS_IVYBRIDGE(dev)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
- } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
- dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = haswell_write_eld;
- dev_priv->display.modeset_global_resources =
- haswell_modeset_global_resources;
- }
- } else if (IS_G4X(dev)) {
+ if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
+ } else if (IS_GEN5(dev)) {
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ snb_modeset_global_resources;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ dev_priv->display.write_eld = haswell_write_eld;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
@@ -12550,8 +12609,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_init_clock_gating(dev);
- intel_reset_dpio(dev);
-
intel_enable_gt_powersave(dev);
}
@@ -12597,7 +12654,10 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_GEN2(dev)) {
+ if (IS_845G(dev) || IS_I865G(dev)) {
+ dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ dev->mode_config.cursor_height = 1023;
+ } else if (IS_GEN2(dev)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
@@ -12622,7 +12682,6 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_init_dpio(dev);
- intel_reset_dpio(dev);
intel_shared_dpll_init(dev);
@@ -12665,7 +12724,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
@@ -12682,10 +12741,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
if (!crt)
return;
- if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
-
-
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
}
static bool
@@ -12952,8 +13009,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
@@ -13017,8 +13073,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
/* HW state is read out, now we need to sanitize this mess. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
@@ -13117,7 +13172,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
- cancel_work_sync(&dev_priv->hotplug_work);
+ intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true;
/*
@@ -13385,3 +13440,25 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_unpin_work *work;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ work = crtc->unpin_work;
+
+ if (work && work->event &&
+ work->event->base.file_priv == file) {
+ kfree(work->event);
+ work->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eb52ecfe14cf..6a2256cf1f2a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -828,20 +828,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static void
-intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder transcoder = crtc->config.cpu_transcoder;
-
- I915_WRITE(PIPE_DATA_M2(transcoder),
- TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
- I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
-}
-
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -867,6 +853,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_dp_encoder = true;
+ pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
@@ -970,13 +957,14 @@ found:
if (intel_connector->panel.downclock_mode != NULL &&
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2);
}
- if (HAS_DDI(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1285,6 +1273,19 @@ static void edp_panel_vdd_work(struct work_struct *__work)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
+}
+
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
@@ -1294,17 +1295,10 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
intel_dp->want_panel_vdd = false;
- if (sync) {
+ if (sync)
edp_panel_vdd_off_sync(intel_dp);
- } else {
- /*
- * Queue the timer to fire a long
- * time from now (relative to the power down delay)
- * to keep the panel power up across a sequence of operations
- */
- schedule_delayed_work(&intel_dp->panel_vdd_work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
}
void intel_edp_panel_on(struct intel_dp *intel_dp)
@@ -1800,7 +1794,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
lockdep_assert_held(&dev_priv->psr.lock);
- lockdep_assert_held(&dev->struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
@@ -2288,6 +2281,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
enum pipe pipe = intel_crtc->pipe;
u32 val;
+ intel_dp_prepare(encoder);
+
mutex_lock(&dev_priv->dpio_lock);
/* program left/right clock distribution */
@@ -2654,8 +2649,8 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
/* Program swing margin */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
- val &= ~DPIO_SWING_MARGIN_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
@@ -2966,7 +2961,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- *DP &= ~DP_LINK_TRAIN_MASK;
+ if (IS_CHERRYVIEW(dev))
+ *DP &= ~DP_LINK_TRAIN_MASK_CHV;
+ else
+ *DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
@@ -2979,8 +2977,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- DRM_ERROR("DP training pattern 3 not supported\n");
- *DP |= DP_LINK_TRAIN_PAT_2;
+ if (IS_CHERRYVIEW(dev)) {
+ *DP |= DP_LINK_TRAIN_PAT_3_CHV;
+ } else {
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ }
break;
}
}
@@ -3267,7 +3269,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (IS_CHERRYVIEW(dev))
+ DP &= ~DP_LINK_TRAIN_MASK_CHV;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
}
POSTING_READ(intel_dp->output_reg);
@@ -3548,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (WARN_ON(!intel_encoder->base.crtc))
return;
+ if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+ return;
+
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
@@ -3998,6 +4006,21 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (!is_edp(intel_dp))
+ return;
+
+ edp_panel_vdd_off_sync(intel_dp);
+}
+
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+ intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
@@ -4013,6 +4036,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
@@ -4026,15 +4050,22 @@ bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ enum intel_display_power_domain power_domain;
+ bool ret = true;
+
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
- DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+ DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
+ port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
if (long_hpd) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
@@ -4050,8 +4081,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
} else {
if (intel_dp->is_mst) {
- ret = intel_dp_check_mst_status(intel_dp);
- if (ret == -EINVAL)
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
goto mst_fail;
}
@@ -4065,7 +4095,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
- return false;
+ ret = false;
+ goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) {
@@ -4073,7 +4104,10 @@ mst_fail:
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
- return true;
+put_power:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -4382,7 +4416,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
- intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
+ intel_dp_set_m_n(intel_crtc);
} else {
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
@@ -4422,7 +4456,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
}
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- DRM_INFO("VBT doesn't support DRRS\n");
+ DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
return NULL;
}
@@ -4430,7 +4464,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
(dev, fixed_mode, connector);
if (!downclock_mode) {
- DRM_INFO("DRRS not supported\n");
+ DRM_DEBUG_KMS("DRRS not supported\n");
return NULL;
}
@@ -4441,10 +4475,36 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
- DRM_INFO("seamless DRRS supported for eDP panel.\n");
+ DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
+void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp;
+ enum intel_display_power_domain power_domain;
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
@@ -4465,13 +4525,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
- /* The VDD bit needs a power domain reference, so if the bit is already
- * enabled when we boot, grab this reference. */
- if (edp_have_panel_vdd(intel_dp)) {
- enum intel_display_power_domain power_domain;
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
- }
+ intel_edp_panel_vdd_sanitize(intel_encoder);
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
@@ -4691,6 +4745,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a475a6909c3..d683a2090249 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,12 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
+ /*
+ * Called during system suspend after all pending requests for the
+ * encoder are flushed (for example for DP AUX transactions) and
+ * device interrupts are disabled.
+ */
+ void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
};
@@ -324,6 +330,7 @@ struct intel_crtc_config {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
+ bool has_drrs;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -404,6 +411,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
+ uint32_t cursor_size;
uint32_t cursor_base;
struct intel_plane_config plane_config;
@@ -424,8 +432,6 @@ struct intel_crtc {
struct intel_pipe_wm active;
} wm;
- wait_queue_head_t vbl_wait;
-
int scanline_offset;
struct intel_mmio_flip mmio_flip;
};
@@ -449,6 +455,7 @@ struct intel_plane {
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
+ unsigned int rotation;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -830,8 +837,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
+ struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *pipelined);
@@ -877,6 +883,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
+void intel_dp_set_m_n(struct intel_crtc *crtc);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
@@ -891,7 +898,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
-
+void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -912,6 +919,7 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
@@ -945,7 +953,7 @@ void intel_dvo_init(struct drm_device *dev);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
extern void intel_fbdev_restore_mode(struct drm_device *dev);
#else
@@ -962,7 +970,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
{
}
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
}
@@ -1085,7 +1093,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-void intel_plane_restore(struct drm_plane *plane);
+int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index bfcefbf33709..5bd9e09ad3c5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -92,6 +92,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ /* DSI uses short packets for sync events, so clear mode flags for DSI */
+ adjusted_mode->flags = 0;
+
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
@@ -152,6 +155,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
@@ -177,6 +182,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
+ /* update the hw state for DPLL */
+ intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
+ DPLL_REFA_CLK_ENABLE_VLV;
+
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -192,6 +201,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
@@ -232,6 +243,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
@@ -246,8 +259,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
temp = I915_READ(MIPI_CTRL(pipe));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(pipe), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
@@ -261,6 +274,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+ wait_for_dsi_fifo_empty(intel_dsi);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -282,7 +297,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
usleep_range(2000, 2500);
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
- == 0x00000), 30))
+ == 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
val = I915_READ(MIPI_PORT_CTRL(pipe));
@@ -351,9 +366,21 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
+ u32 pclk;
DRM_DEBUG_KMS("\n");
- /* XXX: read flags, set to adjusted_mode */
+ /*
+ * DPLL_MD is not used in case of DSI, reading will get some default value
+ * set dpll_md = 0
+ */
+ pipe_config->dpll_hw_state.dpll_md = 0;
+
+ pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
+ if (!pclk)
+ return;
+
+ pipe_config->adjusted_mode.crtc_clock = pclk;
+ pipe_config->port_clock = pclk;
}
static enum drm_mode_status
@@ -396,9 +423,11 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
{
- return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
}
static void set_dsi_timings(struct drm_encoder *encoder,
@@ -424,10 +453,12 @@ static void set_dsi_timings(struct drm_encoder *encoder,
vbp = mode->vtotal - mode->vsync_end;
/* horizontal values are in terms of high speed byte clock */
- hactive = txbyteclkhs(hactive, bpp, lane_count);
- hfp = txbyteclkhs(hfp, bpp, lane_count);
- hsync = txbyteclkhs(hsync, bpp, lane_count);
- hbp = txbyteclkhs(hbp, bpp, lane_count);
+ hactive = txbyteclkhs(hactive, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
+ hsync = txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
@@ -514,12 +545,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->htotal, bpp,
- intel_dsi->lane_count) + 1);
+ intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
- bpp, intel_dsi->lane_count) + 1);
+ bpp, intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
@@ -549,7 +582,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
- intel_dsi->hs_to_lp_count);
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
@@ -574,10 +607,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
* 64 like 1366 x 768. Enable RANDOM resolution support for such
* panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 31db33d3e5cc..657eb5c1b9d8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -116,6 +116,8 @@ struct intel_dsi {
u16 clk_hs_to_lp_count;
u16 init_count;
+ u32 pclk;
+ u16 burst_mode_ratio;
/* all delays in ms */
u16 backlight_off_delay;
@@ -132,6 +134,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 933c86305237..f4767fd2ebeb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -419,3 +419,19 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
return 0;
}
+
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
+ DRM_ERROR("DPI FIFOs are not empty\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 9a18cbfa5460..46aa1acc00eb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -51,6 +51,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 47c7584a4aa0..f6bdd44069ce 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -271,6 +271,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
u32 ths_prepare_ns, tclk_trail_ns;
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 pclk, computed_ddr;
+ u16 burst_mode_ratio;
DRM_DEBUG_KMS("\n");
@@ -284,8 +286,6 @@ static bool generic_init(struct intel_dsi_device *dsi)
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
bits_per_pixel = 16;
- bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
-
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -297,6 +297,40 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ pclk = mode->clock;
+
+ /* Burst Mode Ratio
+ * Target ddr frequency from VBT / non burst ddr freq
+ * multiply by 100 to preserve remainder
+ */
+ if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ if (mipi_config->target_burst_mode_freq) {
+ computed_ddr =
+ (pclk * bits_per_pixel) / intel_dsi->lane_count;
+
+ if (mipi_config->target_burst_mode_freq <
+ computed_ddr) {
+ DRM_ERROR("Burst mode freq is less than computed\n");
+ return false;
+ }
+
+ burst_mode_ratio = DIV_ROUND_UP(
+ mipi_config->target_burst_mode_freq * 100,
+ computed_ddr);
+
+ pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
+ } else {
+ DRM_ERROR("Burst mode target is not set\n");
+ return false;
+ }
+ } else
+ burst_mode_ratio = 100;
+
+ intel_dsi->burst_mode_ratio = burst_mode_ratio;
+ intel_dsi->pclk = pclk;
+
+ bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
+
switch (intel_dsi->escape_clk_div) {
case 0:
tlpx_ns = 50;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index ba79ec19da3b..fa7a6ca34cd6 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -134,8 +134,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
#else
/* Get DSI clock from pixel clock */
-static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
- int pixel_format, int lane_count)
+static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
u32 bpp;
@@ -156,7 +155,7 @@ static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
- dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
+ dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
return dsi_clk_khz;
}
@@ -191,7 +190,7 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
/* Find the optimal m and p divisors
- with minimal error +/- the required clock */
+ with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
if (calc_dsi_clk == target_dsi_clk) {
calc_m = m;
@@ -228,15 +227,13 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
- dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
- intel_dsi->lane_count);
+ dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
if (ret) {
@@ -298,3 +295,84 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+
+static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+{
+ int bpp;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ WARN(bpp != pipe_bpp,
+ "bpp match assertion failure (expected %d, current %d)\n",
+ bpp, pipe_bpp);
+}
+
+u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dsi_clock, pclk;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0;
+ int refclk = 25000;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+ pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ DRM_ERROR("wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ DRM_ERROR("wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / p;
+
+ /* pixel_format and pipe_bpp should agree */
+ assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
+
+ pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+
+ return pclk;
+}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f475414671d8..cf052a39558d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -636,6 +637,15 @@ out:
return false;
}
+static void intel_fbdev_suspend_worker(struct work_struct *work)
+{
+ intel_fbdev_set_suspend(container_of(work,
+ struct drm_i915_private,
+ fbdev_suspend_work)->dev,
+ FBINFO_STATE_RUNNING,
+ true);
+}
+
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
@@ -662,6 +672,8 @@ int intel_fbdev_init(struct drm_device *dev)
}
dev_priv->fbdev = ifbdev;
+ INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
return 0;
@@ -682,12 +694,14 @@ void intel_fbdev_fini(struct drm_device *dev)
if (!dev_priv->fbdev)
return;
+ flush_work(&dev_priv->fbdev_suspend_work);
+
intel_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
-void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
@@ -698,6 +712,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
info = ifbdev->helper.fbdev;
+ if (synchronous) {
+ /* Flush any pending work to turn the console on, and then
+ * wait to turn it off. It must be synchronous as we are
+ * about to suspend or unload the driver.
+ *
+ * Note that from within the work-handler, we cannot flush
+ * ourselves, so only flush outstanding work upon suspend!
+ */
+ if (state != FBINFO_STATE_RUNNING)
+ flush_work(&dev_priv->fbdev_suspend_work);
+ console_lock();
+ } else {
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ WARN_ON(state != FBINFO_STATE_RUNNING);
+ if (!console_trylock()) {
+ /* Don't block our own workqueue as this can
+ * be run in parallel with other i915.ko tasks.
+ */
+ schedule_work(&dev_priv->fbdev_suspend_work);
+ return;
+ }
+ }
+
/* On resume from hibernation: If the object is shmemfs backed, it has
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
@@ -706,6 +747,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
+ console_unlock();
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f9151f6641d9..9169786dbbc3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -885,7 +885,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
if (HAS_GMCH_DISPLAY(dev))
return false;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
@@ -1260,6 +1260,8 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
enum pipe pipe = intel_crtc->pipe;
u32 val;
+ intel_hdmi_prepare(encoder);
+
mutex_lock(&dev_priv->dpio_lock);
/* program left/right clock distribution */
@@ -1429,8 +1431,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
- val &= ~DPIO_SWING_MARGIN_MASK;
- val |= 102 << DPIO_SWING_MARGIN_SHIFT;
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
new file mode 100644
index 000000000000..c096b9b7f22a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -0,0 +1,1697 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ * Michel Thierry <michel.thierry@intel.com>
+ * Thomas Daniel <thomas.daniel@intel.com>
+ * Oscar Mateo <oscar.mateo@intel.com>
+ *
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
+
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define CTX_LRI_HEADER_0 0x01
+#define CTX_CONTEXT_CONTROL 0x02
+#define CTX_RING_HEAD 0x04
+#define CTX_RING_TAIL 0x06
+#define CTX_RING_BUFFER_START 0x08
+#define CTX_RING_BUFFER_CONTROL 0x0a
+#define CTX_BB_HEAD_U 0x0c
+#define CTX_BB_HEAD_L 0x0e
+#define CTX_BB_STATE 0x10
+#define CTX_SECOND_BB_HEAD_U 0x12
+#define CTX_SECOND_BB_HEAD_L 0x14
+#define CTX_SECOND_BB_STATE 0x16
+#define CTX_BB_PER_CTX_PTR 0x18
+#define CTX_RCS_INDIRECT_CTX 0x1a
+#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
+#define CTX_LRI_HEADER_1 0x21
+#define CTX_CTX_TIMESTAMP 0x22
+#define CTX_PDP3_UDW 0x24
+#define CTX_PDP3_LDW 0x26
+#define CTX_PDP2_UDW 0x28
+#define CTX_PDP2_LDW 0x2a
+#define CTX_PDP1_UDW 0x2c
+#define CTX_PDP1_LDW 0x2e
+#define CTX_PDP0_UDW 0x30
+#define CTX_PDP0_LDW 0x32
+#define CTX_LRI_HEADER_2 0x41
+#define CTX_R_PWR_CLK_STATE 0x42
+#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
+enum {
+ ADVANCED_CONTEXT = 0,
+ LEGACY_CONTEXT,
+ ADVANCED_AD_CONTEXT,
+ LEGACY_64B_CONTEXT
+};
+#define GEN8_CTX_MODE_SHIFT 3
+enum {
+ FAULT_AND_HANG = 0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
+#define GEN8_CTX_ID_SHIFT 32
+
+/**
+ * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
+ * @dev: DRM device.
+ * @enable_execlists: value of i915.enable_execlists module parameter.
+ *
+ * Only certain platforms support Execlists (the prerequisites being
+ * support for Logical Ring Contexts and Aliasing PPGTT or better),
+ * and only when enabled via module parameter.
+ *
+ * Return: 1 if Execlists is supported and has to be enabled.
+ */
+int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+{
+ WARN_ON(i915.enable_ppgtt == -1);
+
+ if (enable_execlists == 0)
+ return 0;
+
+ if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+ i915.use_mmio_flip >= 0)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * intel_execlists_ctx_id() - get the Execlists Context ID
+ * @ctx_obj: Logical Ring Context backing object.
+ *
+ * Do not confuse with ctx->id! Unfortunately we have a name overload
+ * here: the old context ID we pass to userspace as a handler so that
+ * they can refer to a context, and the new context ID we pass to the
+ * ELSP so that the GPU can inform us of the context status via
+ * interrupts.
+ *
+ * Return: 20-bits globally unique context ID.
+ */
+u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+{
+ u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ /* LRCA is required to be 4K aligned so the more significant 20 bits
+ * are globally unique */
+ return lrca >> 12;
+}
+
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
+{
+ uint64_t desc;
+ uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
+
+ desc = GEN8_CTX_VALID;
+ desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+ desc |= GEN8_CTX_PRIVILEGE;
+ desc |= lrca;
+ desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers */
+ /* desc |= GEN8_CTX_FORCE_RESTORE; */
+
+ return desc;
+}
+
+static void execlists_elsp_write(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj0,
+ struct drm_i915_gem_object *ctx_obj1)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ uint64_t temp = 0;
+ uint32_t desc[4];
+ unsigned long flags;
+
+ /* XXX: You must always write both descriptors in the order below. */
+ if (ctx_obj1)
+ temp = execlists_ctx_descriptor(ctx_obj1);
+ else
+ temp = 0;
+ desc[1] = (u32)(temp >> 32);
+ desc[0] = (u32)temp;
+
+ temp = execlists_ctx_descriptor(ctx_obj0);
+ desc[3] = (u32)(temp >> 32);
+ desc[2] = (u32)temp;
+
+ /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
+ * are in progress.
+ *
+ * The other problem is that we can't just call gen6_gt_force_wake_get()
+ * because that function calls intel_runtime_pm_get(), which might sleep.
+ * Instead, we do the runtime_pm_get/put when creating/destroying requests.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ I915_WRITE(RING_ELSP(ring), desc[1]);
+ I915_WRITE(RING_ELSP(ring), desc[0]);
+ I915_WRITE(RING_ELSP(ring), desc[3]);
+ /* The context is automatically loaded after the following */
+ I915_WRITE(RING_ELSP(ring), desc[2]);
+
+ /* ELSP is a wo register, so use another nearby reg for posting instead */
+ POSTING_READ(RING_EXECLIST_STATUS(ring));
+
+ /* Release Force Wakeup (see the big comment above). */
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+}
+
+static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
+{
+ struct page *page;
+ uint32_t *reg_state;
+
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
+ reg_state[CTX_RING_TAIL+1] = tail;
+
+ kunmap_atomic(reg_state);
+
+ return 0;
+}
+
+static int execlists_submit_context(struct intel_engine_cs *ring,
+ struct intel_context *to0, u32 tail0,
+ struct intel_context *to1, u32 tail1)
+{
+ struct drm_i915_gem_object *ctx_obj0;
+ struct drm_i915_gem_object *ctx_obj1 = NULL;
+
+ ctx_obj0 = to0->engine[ring->id].state;
+ BUG_ON(!ctx_obj0);
+ WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+
+ execlists_ctx_write_tail(ctx_obj0, tail0);
+
+ if (to1) {
+ ctx_obj1 = to1->engine[ring->id].state;
+ BUG_ON(!ctx_obj1);
+ WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+
+ execlists_ctx_write_tail(ctx_obj1, tail1);
+ }
+
+ execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+
+ return 0;
+}
+
+static void execlists_context_unqueue(struct intel_engine_cs *ring)
+{
+ struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
+ struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ assert_spin_locked(&ring->execlist_lock);
+
+ if (list_empty(&ring->execlist_queue))
+ return;
+
+ /* Try to read in pairs */
+ list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+ execlist_link) {
+ if (!req0) {
+ req0 = cursor;
+ } else if (req0->ctx == cursor->ctx) {
+ /* Same ctx: ignore first request, as second request
+ * will update tail past first request's workload */
+ cursor->elsp_submitted = req0->elsp_submitted;
+ list_del(&req0->execlist_link);
+ queue_work(dev_priv->wq, &req0->work);
+ req0 = cursor;
+ } else {
+ req1 = cursor;
+ break;
+ }
+ }
+
+ WARN_ON(req1 && req1->elsp_submitted);
+
+ WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
+ req1 ? req1->ctx : NULL,
+ req1 ? req1->tail : 0));
+
+ req0->elsp_submitted++;
+ if (req1)
+ req1->elsp_submitted++;
+}
+
+static bool execlists_check_remove_request(struct intel_engine_cs *ring,
+ u32 request_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_ctx_submit_request *head_req;
+
+ assert_spin_locked(&ring->execlist_lock);
+
+ head_req = list_first_entry_or_null(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+
+ if (head_req != NULL) {
+ struct drm_i915_gem_object *ctx_obj =
+ head_req->ctx->engine[ring->id].state;
+ if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+ WARN(head_req->elsp_submitted == 0,
+ "Never submitted head request\n");
+
+ if (--head_req->elsp_submitted <= 0) {
+ list_del(&head_req->execlist_link);
+ queue_work(dev_priv->wq, &head_req->work);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
+ * @ring: Engine Command Streamer to handle.
+ *
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 status_pointer;
+ u8 read_pointer;
+ u8 write_pointer;
+ u32 status;
+ u32 status_id;
+ u32 submit_contexts = 0;
+
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+
+ read_pointer = ring->next_context_status_buffer;
+ write_pointer = status_pointer & 0x07;
+ if (read_pointer > write_pointer)
+ write_pointer += 6;
+
+ spin_lock(&ring->execlist_lock);
+
+ while (read_pointer < write_pointer) {
+ read_pointer++;
+ status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+ (read_pointer % 6) * 8);
+ status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+ (read_pointer % 6) * 8 + 4);
+
+ if (status & GEN8_CTX_STATUS_PREEMPTED) {
+ if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
+ if (execlists_check_remove_request(ring, status_id))
+ WARN(1, "Lite Restored request removed from queue\n");
+ } else
+ WARN(1, "Preemption without Lite Restore\n");
+ }
+
+ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+ (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+ if (execlists_check_remove_request(ring, status_id))
+ submit_contexts++;
+ }
+ }
+
+ if (submit_contexts != 0)
+ execlists_context_unqueue(ring);
+
+ spin_unlock(&ring->execlist_lock);
+
+ WARN(submit_contexts > 2, "More than two context complete events?\n");
+ ring->next_context_status_buffer = write_pointer % 6;
+
+ I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
+ ((u32)ring->next_context_status_buffer & 0x07) << 8);
+}
+
+static void execlists_free_request_task(struct work_struct *work)
+{
+ struct intel_ctx_submit_request *req =
+ container_of(work, struct intel_ctx_submit_request, work);
+ struct drm_device *dev = req->ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_unreference(req->ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(req);
+}
+
+static int execlists_context_queue(struct intel_engine_cs *ring,
+ struct intel_context *to,
+ u32 tail)
+{
+ struct intel_ctx_submit_request *req = NULL, *cursor;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ unsigned long flags;
+ int num_elements = 0;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
+ req->ctx = to;
+ i915_gem_context_reference(req->ctx);
+ req->ring = ring;
+ req->tail = tail;
+ INIT_WORK(&req->work, execlists_free_request_task);
+
+ intel_runtime_pm_get(dev_priv);
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+
+ list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+ if (++num_elements > 2)
+ break;
+
+ if (num_elements > 2) {
+ struct intel_ctx_submit_request *tail_req;
+
+ tail_req = list_last_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+
+ if (to == tail_req->ctx) {
+ WARN(tail_req->elsp_submitted != 0,
+ "More than 2 already-submitted reqs queued\n");
+ list_del(&tail_req->execlist_link);
+ queue_work(dev_priv->wq, &tail_req->work);
+ }
+ }
+
+ list_add_tail(&req->execlist_link, &ring->execlist_queue);
+ if (num_elements == 0)
+ execlists_context_unqueue(ring);
+
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ return 0;
+}
+
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+ struct list_head *vmas)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct i915_vma *vma;
+ uint32_t flush_domains = 0;
+ bool flush_chipset = false;
+ int ret;
+
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ flush_chipset |= i915_gem_clflush_object(obj, false);
+
+ flush_domains |= obj->base.write_domain;
+ }
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ return logical_ring_invalidate_all_caches(ringbuf);
+}
+
+/**
+ * execlists_submission() - submit a batchbuffer for execution, Execlists style
+ * @dev: DRM device.
+ * @file: DRM file.
+ * @ring: Engine Command Streamer to submit to.
+ * @ctx: Context to employ for this submission.
+ * @args: execbuffer call arguments.
+ * @vmas: list of vmas.
+ * @batch_obj: the batchbuffer to submit.
+ * @exec_start: batchbuffer start virtual address pointer.
+ * @flags: translated execbuffer call flags.
+ *
+ * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
+ * away the submission details of the execbuffer ioctl call.
+ *
+ * Return: non-zero if the submission fails.
+ */
+int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ int instp_mode;
+ u32 instp_mask;
+ int ret;
+
+ instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ instp_mask = I915_EXEC_CONSTANTS_MASK;
+ switch (instp_mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ return -EINVAL;
+ }
+
+ if (instp_mode != dev_priv->relative_constants_mode) {
+ if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ return -EINVAL;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ DRM_DEBUG("sol reset is gen7 only\n");
+ return -EINVAL;
+ }
+
+ ret = execlists_move_to_gpu(ringbuf, vmas);
+ if (ret)
+ return ret;
+
+ if (ring == &dev_priv->ring[RCS] &&
+ instp_mode != dev_priv->relative_constants_mode) {
+ ret = intel_logical_ring_begin(ringbuf, 4);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+ intel_logical_ring_emit(ringbuf, INSTPM);
+ intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+ intel_logical_ring_advance(ringbuf);
+
+ dev_priv->relative_constants_mode = instp_mode;
+ }
+
+ ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+ if (ret)
+ return ret;
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+ return 0;
+}
+
+void intel_logical_ring_stop(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ int ret;
+
+ if (!intel_ring_initialized(ring))
+ return;
+
+ ret = intel_ring_idle(ring);
+ if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ /* TODO: Is this correct with Execlists enabled? */
+ I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ return;
+ }
+ I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+}
+
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+/**
+ * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * @ringbuf: Logical Ringbuffer to advance.
+ *
+ * The tail is updated in our logical ringbuffer struct, not in the actual context. What
+ * really happens during submission is that the context and current tail will be placed
+ * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
+ * point, the tail *inside* the context is updated and the ELSP written to.
+ */
+void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
+
+ intel_logical_ring_advance(ringbuf);
+
+ if (intel_ring_stopped(ring))
+ return;
+
+ execlists_context_queue(ring, ctx, ringbuf->tail);
+}
+
+static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ if (ring->outstanding_lazy_seqno)
+ return 0;
+
+ if (ring->preallocated_lazy_request == NULL) {
+ struct drm_i915_gem_request *request;
+
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ /* Hold a reference to the context this request belongs to
+ * (we will need it when the time comes to emit/retire the
+ * request).
+ */
+ request->ctx = ctx;
+ i915_gem_context_reference(request->ctx);
+
+ ring->preallocated_lazy_request = request;
+ }
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+}
+
+static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
+ int bytes)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+
+ ringbuf->space = intel_ring_space(ringbuf);
+ if (ringbuf->space >= bytes)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (__intel_ring_space(request->tail, ringbuf->tail,
+ ringbuf->size) >= bytes) {
+ seqno = request->seqno;
+ break;
+ }
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+
+ ringbuf->space = intel_ring_space(ringbuf);
+ return 0;
+}
+
+static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+ int bytes)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long end;
+ int ret;
+
+ ret = logical_ring_wait_request(ringbuf, bytes);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* Force the context submission in case we have been skipping it */
+ intel_logical_ring_advance_and_submit(ringbuf);
+
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
+
+ do {
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->space = intel_ring_space(ringbuf);
+ if (ringbuf->space >= bytes) {
+ ret = 0;
+ break;
+ }
+
+ msleep(1);
+
+ if (dev_priv->mm.interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ break;
+
+ if (time_after(jiffies, end)) {
+ ret = -EBUSY;
+ break;
+ }
+ } while (1);
+
+ return ret;
+}
+
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+{
+ uint32_t __iomem *virt;
+ int rem = ringbuf->size - ringbuf->tail;
+
+ if (ringbuf->space < rem) {
+ int ret = logical_ring_wait_for_space(ringbuf, rem);
+
+ if (ret)
+ return ret;
+ }
+
+ virt = ringbuf->virtual_start + ringbuf->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ringbuf->tail = 0;
+ ringbuf->space = intel_ring_space(ringbuf);
+
+ return 0;
+}
+
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+{
+ int ret;
+
+ if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
+ ret = logical_ring_wrap_buffer(ringbuf);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ringbuf->space < bytes)) {
+ ret = logical_ring_wait_for_space(ringbuf, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
+ *
+ * @ringbuf: Logical ringbuffer.
+ * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
+ *
+ * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
+ * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
+ * and also preallocates a request (every workload submission is still mediated through
+ * requests, same as it did with legacy ringbuffer submission).
+ *
+ * Return: non-zero if the ringbuffer is not ready to be written to.
+ */
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+ if (ret)
+ return ret;
+
+ ringbuf->space -= num_dwords * sizeof(uint32_t);
+ return 0;
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+ I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
+ _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ POSTING_READ(RING_MODE_GEN7(ring));
+ DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+
+ memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+
+ return 0;
+}
+
+static int gen8_init_render_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = gen8_init_common_ring(ring);
+ if (ret)
+ return ret;
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+ */
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ ret = intel_init_pipe_control(ring);
+ if (ret)
+ return ret;
+
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+ return ret;
+}
+
+static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+ u64 offset, unsigned flags)
+{
+ bool ppgtt = !(flags & I915_DISPATCH_SECURE);
+ int ret;
+
+ ret = intel_logical_ring_begin(ringbuf, 4);
+ if (ret)
+ return ret;
+
+ /* FIXME(BDW): Address space and security selectors. */
+ intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
+ intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+ u32 invalidate_domains,
+ u32 unused)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_logical_ring_begin(ringbuf, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW + 1;
+
+ if (ring == &dev_priv->ring[VCS]) {
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ } else {
+ if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ }
+
+ intel_logical_ring_emit(ringbuf, cmd);
+ intel_logical_ring_emit(ringbuf,
+ I915_GEM_HWS_SCRATCH_ADDR |
+ MI_FLUSH_DW_USE_GTT);
+ intel_logical_ring_emit(ringbuf, 0); /* upper addr */
+ intel_logical_ring_emit(ringbuf, 0); /* value */
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 flags = 0;
+ int ret;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ }
+
+ ret = intel_logical_ring_begin(ringbuf, 6);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, flags);
+ intel_logical_ring_emit(ringbuf, scratch_addr);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ u32 cmd;
+ int ret;
+
+ ret = intel_logical_ring_begin(ringbuf, 6);
+ if (ret)
+ return ret;
+
+ cmd = MI_STORE_DWORD_IMM_GEN8;
+ cmd |= MI_GLOBAL_GTT;
+
+ intel_logical_ring_emit(ringbuf, cmd);
+ intel_logical_ring_emit(ringbuf,
+ (ring->status_page.gfx_addr +
+ (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance_and_submit(ringbuf);
+
+ return 0;
+}
+
+/**
+ * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
+ *
+ * @ring: Engine Command Streamer.
+ *
+ */
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!intel_ring_initialized(ring))
+ return;
+
+ intel_logical_ring_stop(ring);
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ if (ring->status_page.obj) {
+ kunmap(sg_page(ring->status_page.obj->pages->sgl));
+ ring->status_page.obj = NULL;
+ }
+}
+
+static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+ int ret;
+ struct intel_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *dctx_obj;
+
+ /* Intentionally left blank. */
+ ring->buffer = NULL;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ init_waitqueue_head(&ring->irq_queue);
+
+ INIT_LIST_HEAD(&ring->execlist_queue);
+ spin_lock_init(&ring->execlist_lock);
+ ring->next_context_status_buffer = 0;
+
+ ret = intel_lr_context_deferred_create(dctx, ring);
+ if (ret)
+ return ret;
+
+ /* The status page is offset 0 from the context object in LRCs. */
+ dctx_obj = dctx->engine[ring->id].state;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
+ ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
+ if (ring->status_page.page_addr == NULL)
+ return -ENOMEM;
+ ring->status_page.obj = dctx_obj;
+
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ return ret;
+
+ if (ring->init) {
+ ret = ring->init(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int logical_render_ring_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+ ring->irq_keep_mask =
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+ if (HAS_L3_DPF(dev))
+ ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ ring->init = gen8_init_render_ring;
+ ring->cleanup = intel_fini_pipe_control;
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush_render;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+
+ return logical_ring_init(dev, ring);
+}
+
+static int logical_bsd_ring_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+
+ ring->name = "bsd ring";
+ ring->id = VCS;
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+ ring->irq_keep_mask =
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+
+ ring->init = gen8_init_common_ring;
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+
+ return logical_ring_init(dev, ring);
+}
+
+static int logical_bsd2_ring_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+ ring->name = "bds2 ring";
+ ring->id = VCS2;
+ ring->mmio_base = GEN8_BSD2_RING_BASE;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+ ring->irq_keep_mask =
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+
+ ring->init = gen8_init_common_ring;
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+
+ return logical_ring_init(dev, ring);
+}
+
+static int logical_blt_ring_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+
+ ring->name = "blitter ring";
+ ring->id = BCS;
+ ring->mmio_base = BLT_RING_BASE;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+ ring->irq_keep_mask =
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+
+ ring->init = gen8_init_common_ring;
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+
+ return logical_ring_init(dev, ring);
+}
+
+static int logical_vebox_ring_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+
+ ring->name = "video enhancement ring";
+ ring->id = VECS;
+ ring->mmio_base = VEBOX_RING_BASE;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+ ring->irq_keep_mask =
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+
+ ring->init = gen8_init_common_ring;
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+
+ return logical_ring_init(dev, ring);
+}
+
+/**
+ * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
+ * @dev: DRM device.
+ *
+ * This function inits the engines for an Execlists submission style (the equivalent in the
+ * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * those engines that are present in the hardware.
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_logical_rings_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = logical_render_ring_init(dev);
+ if (ret)
+ return ret;
+
+ if (HAS_BSD(dev)) {
+ ret = logical_bsd_ring_init(dev);
+ if (ret)
+ goto cleanup_render_ring;
+ }
+
+ if (HAS_BLT(dev)) {
+ ret = logical_blt_ring_init(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
+ if (HAS_VEBOX(dev)) {
+ ret = logical_vebox_ring_init(dev);
+ if (ret)
+ goto cleanup_blt_ring;
+ }
+
+ if (HAS_BSD2(dev)) {
+ ret = logical_bsd2_ring_init(dev);
+ if (ret)
+ goto cleanup_vebox_ring;
+ }
+
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_bsd2_ring;
+
+ return 0;
+
+cleanup_bsd2_ring:
+ intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
+cleanup_vebox_ring:
+ intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+ intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+ intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+ intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+
+ return ret;
+}
+
+static int
+populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
+ struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+{
+ struct drm_i915_gem_object *ring_obj = ringbuf->obj;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct page *page;
+ uint32_t *reg_state;
+ int ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
+ return ret;
+ }
+
+ ret = i915_gem_object_get_pages(ctx_obj);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Could not get object pages\n");
+ return ret;
+ }
+
+ i915_gem_object_pin_pages(ctx_obj);
+
+ /* The second page of the context object contains some fields which must
+ * be set up prior to the first execution. */
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
+ /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
+ * commands followed by (reg, value) pairs. The values we are setting here are
+ * only for the first context restore: on a subsequent save, the GPU will
+ * recreate this batchbuffer with new values (including all the missing
+ * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
+ if (ring->id == RCS)
+ reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
+ else
+ reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
+ reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
+ reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
+ reg_state[CTX_CONTEXT_CONTROL+1] =
+ _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
+ reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
+ reg_state[CTX_RING_HEAD+1] = 0;
+ reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
+ reg_state[CTX_RING_TAIL+1] = 0;
+ reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
+ reg_state[CTX_RING_BUFFER_CONTROL+1] =
+ ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
+ reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
+ reg_state[CTX_BB_HEAD_U+1] = 0;
+ reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
+ reg_state[CTX_BB_HEAD_L+1] = 0;
+ reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
+ reg_state[CTX_BB_STATE+1] = (1<<5);
+ reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
+ reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
+ reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
+ reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
+ reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
+ reg_state[CTX_SECOND_BB_STATE+1] = 0;
+ if (ring->id == RCS) {
+ /* TODO: according to BSpec, the register state context
+ * for CHV does not have these. OTOH, these registers do
+ * exist in CHV. I'm waiting for a clarification */
+ reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
+ reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
+ reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
+ reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
+ reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
+ reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+ }
+ reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
+ reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
+ reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
+ reg_state[CTX_CTX_TIMESTAMP+1] = 0;
+ reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
+ reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
+ reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
+ reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
+ reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
+ reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
+ reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
+ reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
+ if (ring->id == RCS) {
+ reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
+ reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
+ reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
+ }
+
+ kunmap_atomic(reg_state);
+
+ ctx_obj->dirty = 1;
+ set_page_dirty(page);
+ i915_gem_object_unpin_pages(ctx_obj);
+
+ return 0;
+}
+
+/**
+ * intel_lr_context_free() - free the LRC specific bits of a context
+ * @ctx: the LR context to free.
+ *
+ * The real context freeing is done in i915_gem_context_free: this only
+ * takes care of the bits that are LRC related: the per-engine backing
+ * objects and the logical ringbuffer.
+ */
+void intel_lr_context_free(struct intel_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+
+ if (ctx_obj) {
+ intel_destroy_ringbuffer_obj(ringbuf);
+ kfree(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ drm_gem_object_unreference(&ctx_obj->base);
+ }
+ }
+}
+
+static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+{
+ int ret = 0;
+
+ WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+
+ switch (ring->id) {
+ case RCS:
+ ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+ break;
+ case VCS:
+ case BCS:
+ case VECS:
+ case VCS2:
+ ret = GEN8_LR_CONTEXT_OTHER_SIZE;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * intel_lr_context_deferred_create() - create the LRC specific bits of a context
+ * @ctx: LR context to create.
+ * @ring: engine to be used with the context.
+ *
+ * This function can be called more than once, with different engines, if we plan
+ * to use the context with them. The context backing objects and the ringbuffers
+ * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
+ * the creation is a deferred call: it's better to make sure first that we need to use
+ * a given ring with the context.
+ *
+ * Return: non-zero on eror.
+ */
+int intel_lr_context_deferred_create(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *ctx_obj;
+ uint32_t context_size;
+ struct intel_ringbuffer *ringbuf;
+ int ret;
+
+ WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
+ if (ctx->engine[ring->id].state)
+ return 0;
+
+ context_size = round_up(get_lr_context_size(ring), 4096);
+
+ ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
+ if (IS_ERR(ctx_obj)) {
+ ret = PTR_ERR(ctx_obj);
+ DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+ }
+
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf) {
+ DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
+ ring->name);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ drm_gem_object_unreference(&ctx_obj->base);
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ ringbuf->ring = ring;
+ ringbuf->FIXME_lrc_ctx = ctx;
+
+ ringbuf->size = 32 * PAGE_SIZE;
+ ringbuf->effective_size = ringbuf->size;
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ ringbuf->space = ringbuf->size;
+ ringbuf->last_retired_head = -1;
+
+ /* TODO: For now we put this in the mappable region so that we can reuse
+ * the existing ringbuffer code which ioremaps it. When we start
+ * creating many contexts, this will no longer work and we must switch
+ * to a kmapish interface.
+ */
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
+ }
+
+ ctx->engine[ring->id].ringbuf = ringbuf;
+ ctx->engine[ring->id].state = ctx_obj;
+
+ return 0;
+
+error:
+ kfree(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
new file mode 100644
index 000000000000..991d4499fb03
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_LRC_H_
+#define _INTEL_LRC_H_
+
+/* Execlists regs */
+#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
+#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
+#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
+#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
+#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
+
+/* Logical Rings */
+void intel_logical_ring_stop(struct intel_engine_cs *ring);
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+int intel_logical_rings_init(struct drm_device *dev);
+
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
+void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
+/**
+ * intel_logical_ring_advance() - advance the ringbuffer tail
+ * @ringbuf: Ringbuffer to advance.
+ *
+ * The tail is only updated in our logical ringbuffer struct.
+ */
+static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
+{
+ ringbuf->tail &= ringbuf->size - 1;
+}
+/**
+ * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
+ * @ringbuf: Ringbuffer to write to.
+ * @data: DWORD to write.
+ */
+static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
+ u32 data)
+{
+ iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
+}
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+
+/* Logical Ring Contexts */
+void intel_lr_context_free(struct intel_context *ctx);
+int intel_lr_context_deferred_create(struct intel_context *ctx,
+ struct intel_engine_cs *ring);
+
+/* Execlists */
+int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags);
+u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
+
+/**
+ * struct intel_ctx_submit_request - queued context submission request
+ * @ctx: Context to submit to the ELSP.
+ * @ring: Engine to submit it to.
+ * @tail: how far in the context's ringbuffer this request goes to.
+ * @execlist_link: link in the submission queue.
+ * @work: workqueue for processing this request in a bottom half.
+ * @elsp_submitted: no. of times this request has been sent to the ELSP.
+ *
+ * The ELSP only accepts two elements at a time, so we queue context/tail
+ * pairs on a given queue (ring->execlist_queue) until the hardware is
+ * available. The queue serves a double purpose: we also use it to keep track
+ * of the up to 2 contexts currently in the hardware (usually one in execution
+ * and the other queued up by the GPU): We only remove elements from the head
+ * of the queue when the hardware informs us that an element has been
+ * completed.
+ *
+ * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
+ */
+struct intel_ctx_submit_request {
+ struct intel_context *ctx;
+ struct intel_engine_cs *ring;
+ u32 tail;
+
+ struct list_head execlist_link;
+ struct work_struct work;
+
+ int elsp_submitted;
+};
+
+void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+
+#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 881361c0f27e..1987491723a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -823,8 +823,7 @@ bool intel_is_dual_link_lvds(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_lvds_encoder *lvds_encoder;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS) {
lvds_encoder = to_lvds_encoder(&encoder->base);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3f88f29a98c0..c8f744c418f0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -309,6 +309,9 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+ if (dev_priv->fbc.false_color)
+ dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev)) {
@@ -1268,34 +1271,27 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static bool vlv_compute_drain_latency(struct drm_device *dev,
- int plane,
- int *plane_prec_mult,
- int *plane_dl,
- int *cursor_prec_mult,
- int *cursor_dl)
+static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
+ int pixel_size,
+ int *prec_mult,
+ int *drain_latency)
{
- struct drm_crtc *crtc;
- int clock, pixel_size;
int entries;
+ int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (!intel_crtc_active(crtc))
+ if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
- clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
+ if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
+ return false;
- entries = (clock / 1000) * pixel_size;
- *plane_prec_mult = (entries > 256) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
- *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
- pixel_size);
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
+ DRAIN_LATENCY_PRECISION_32;
+ *drain_latency = (64 * (*prec_mult) * 4) / entries;
- entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
- *cursor_prec_mult = (entries > 256) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
- *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+ if (*drain_latency > DRAIN_LATENCY_MASK)
+ *drain_latency = DRAIN_LATENCY_MASK;
return true;
}
@@ -1308,39 +1304,48 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
* latency value.
*/
-static void vlv_update_drain_latency(struct drm_device *dev)
+static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_prec, planea_dl, planeb_prec, planeb_dl;
- int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
- int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
- either 16 or 32 */
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pixel_size;
+ int drain_latency;
+ enum pipe pipe = intel_crtc->pipe;
+ int plane_prec, prec_mult, plane_dl;
- /* For plane A, Cursor A */
- if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
- &cursor_prec_mult, &cursora_dl)) {
- cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
- planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+ plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
+ DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
+ (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
+
+ if (!intel_crtc_active(crtc)) {
+ I915_WRITE(VLV_DDL(pipe), plane_dl);
+ return;
+ }
- I915_WRITE(VLV_DDL1, cursora_prec |
- (cursora_dl << DDL_CURSORA_SHIFT) |
- planea_prec | planea_dl);
+ /* Primary plane Drain Latency */
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
+ if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
+ plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
+ DDL_PLANE_PRECISION_64 :
+ DDL_PLANE_PRECISION_32;
+ plane_dl |= plane_prec | drain_latency;
}
- /* For plane B, Cursor B */
- if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
- &cursor_prec_mult, &cursorb_dl)) {
- cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
- planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+ /* Cursor Drain Latency
+ * BPP is always 4 for cursor
+ */
+ pixel_size = 4;
- I915_WRITE(VLV_DDL2, cursorb_prec |
- (cursorb_dl << DDL_CURSORB_SHIFT) |
- planeb_prec | planeb_dl);
+ /* Program cursor DL only if it is enabled */
+ if (intel_crtc->cursor_base &&
+ vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
+ plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
+ DDL_CURSOR_PRECISION_64 :
+ DDL_CURSOR_PRECISION_32;
+ plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
+
+ I915_WRITE(VLV_DDL(pipe), plane_dl);
}
#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1356,7 +1361,73 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
unsigned int enabled = 0;
bool cxsr_enabled;
- vlv_update_drain_latency(dev);
+ vlv_update_drain_latency(crtc);
+
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1 << PIPE_A;
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 1 << PIPE_B;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
+ cxsr_enabled = true;
+ } else {
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
+ "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ (planea_wm << DSPFW_PLANEA_SHIFT));
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void cherryview_update_wm(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, planec_wm;
+ int cursora_wm, cursorb_wm, cursorc_wm;
+ int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
+ unsigned int enabled = 0;
+ bool cxsr_enabled;
+
+ vlv_update_drain_latency(crtc);
if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, latency_ns,
@@ -1370,6 +1441,12 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
+ if (g4x_compute_wm0(dev, PIPE_C,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planec_wm, &cursorc_wm))
+ enabled |= 1 << PIPE_C;
+
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
@@ -1388,27 +1465,66 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
plane_sr = cursor_sr = 0;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
+ "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
+ "SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
+ planec_wm, cursorc_wm,
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
+ (planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ I915_WRITE(DSPFW9_CHV,
+ (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
+ DSPFW_CURSORC_MASK)) |
+ (planec_wm << DSPFW_PLANEC_SHIFT) |
+ (cursorc_wm << DSPFW_CURSORC_SHIFT));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
+static void valleyview_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
+ bool enabled, bool scaled)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_plane(plane)->pipe;
+ int sprite = to_intel_plane(plane)->plane;
+ int drain_latency;
+ int plane_prec;
+ int sprite_dl;
+ int prec_mult;
+
+ sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
+ (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
+
+ if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
+ &drain_latency)) {
+ plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
+ DDL_SPRITE_PRECISION_64(sprite) :
+ DDL_SPRITE_PRECISION_32(sprite);
+ sprite_dl |= plane_prec |
+ (drain_latency << DDL_SPRITE_SHIFT(sprite));
+ }
+
+ I915_WRITE(VLV_DDL(pipe), sprite_dl);
+}
+
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1444,7 +1560,8 @@ static void g4x_update_wm(struct drm_crtc *crtc)
plane_sr = cursor_sr = 0;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
+ "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
@@ -1453,7 +1570,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
+ (planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
@@ -1527,8 +1644,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << 16) | (8 << 8) | (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ (8 << DSPFW_CURSORB_SHIFT) |
+ (8 << DSPFW_PLANEB_SHIFT) |
+ (8 << DSPFW_PLANEA_SHIFT));
+ I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
+ (8 << DSPFW_PLANEC_SHIFT_OLD));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
@@ -3034,7 +3154,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
I915_READ(0x112e0);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
dev_priv->ips.last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->ips.last_time2);
+ dev_priv->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock);
}
@@ -3420,10 +3540,10 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3447,8 +3567,8 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
mask = INTEL_RC6_ENABLE;
if ((enable_rc6 & mask) != enable_rc6)
- DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
+ DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
return enable_rc6 & mask;
}
@@ -3599,7 +3719,6 @@ static void gen6_enable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 rp_state_cap;
- u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -3624,7 +3743,6 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
parse_rp_state_cap(dev_priv, rp_state_cap);
@@ -4595,18 +4713,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- struct timespec now, diff1;
- u64 diff;
- unsigned long diffms;
+ u64 now, diff, diffms;
u32 count;
assert_spin_locked(&mchdev_lock);
- getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->ips.last_time2);
+ now = ktime_get_raw_ns();
+ diffms = now - dev_priv->ips.last_time2;
+ do_div(diffms, NSEC_PER_MSEC);
/* Don't divide by 0 */
- diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
if (!diffms)
return;
@@ -5230,11 +5346,9 @@ static void gen6_check_mch_setup(struct drm_device *dev)
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
- if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
- DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
- DRM_INFO("This can cause pipe underruns and display issues.\n");
- DRM_INFO("Please upgrade your BIOS to fix this.\n");
- }
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+ DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+ tmp);
}
static void gen6_init_clock_gating(struct drm_device *dev)
@@ -6257,6 +6371,153 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ } else {
+ phy = DPIO_PHY1;
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ }
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+ PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+ ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = power_well->data;
+ bool enabled;
+ u32 state, ctrl;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = power_well->data;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -6448,6 +6709,39 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
+#define CHV_PIPE_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -6455,6 +6749,20 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.is_enabled = i9xx_always_on_power_well_enabled,
};
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
@@ -6577,6 +6885,107 @@ static struct i915_power_well vlv_power_wells[] = {
},
};
+static struct i915_power_well chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+#if 0
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "pipe-a",
+ .domains = CHV_PIPE_A_POWER_DOMAINS,
+ .data = PIPE_A,
+ .ops = &chv_pipe_power_well_ops,
+ },
+ {
+ .name = "pipe-b",
+ .domains = CHV_PIPE_B_POWER_DOMAINS,
+ .data = PIPE_B,
+ .ops = &chv_pipe_power_well_ops,
+ },
+ {
+ .name = "pipe-c",
+ .domains = CHV_PIPE_C_POWER_DOMAINS,
+ .data = PIPE_C,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#endif
+ {
+ .name = "dpio-common-bc",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+ {
+ .name = "dpio-common-d",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+#if 0
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-tx-d-01",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+ },
+ {
+ .name = "dpio-tx-d-23",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+ },
+#endif
+};
+
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
{
@@ -6613,6 +7022,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
@@ -6840,11 +7251,13 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.update_wm = cherryview_update_wm;
+ dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b3d8f766fa7f..4fb1ec95ec08 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,14 +33,24 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
- * but keeps the logic simple. Indeed, the whole purpose of this macro is just
- * to give some inclination as to some of the magic values used in the various
- * workarounds!
- */
-#define CACHELINE_BYTES 64
+bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev)
+ return false;
-static inline int __ring_space(int head, int tail, int size)
+ if (i915.enable_execlists) {
+ struct intel_context *dctx = ring->default_context;
+ struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+
+ return ringbuf->obj;
+ } else
+ return ring->buffer && ring->buffer->obj;
+}
+
+int __intel_ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
@@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_ringbuffer *ringbuf)
+int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
- return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+ return __intel_ring_space(ringbuf->head & HEAD_ADDR,
+ ringbuf->tail, ringbuf->size);
}
-static bool intel_ring_stopped(struct intel_engine_cs *ring)
+bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
@@ -380,6 +391,27 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
}
static int
+gen8_emit_pipe_control(struct intel_engine_cs *ring,
+ u32 flags, u32 scratch_addr)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen8_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
@@ -402,22 +434,17 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
- }
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
-
- return 0;
+ /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
+ ret = gen8_emit_pipe_control(ring,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD,
+ 0);
+ if (ret)
+ return ret;
+ }
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -460,9 +487,14 @@ static bool stop_ring(struct intel_engine_cs *ring)
if (!IS_GEN2(ring->dev)) {
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
- return false;
+ if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+ /* Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+ return false;
}
}
@@ -516,6 +548,9 @@ static int init_ring_common(struct intel_engine_cs *ring)
else
ring_setup_phys_status_page(ring);
+ /* Enforce ordering by reading HEAD register back */
+ I915_READ_HEAD(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
@@ -544,7 +579,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ringbuf);
+ ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
@@ -556,8 +591,25 @@ out:
return ret;
}
-static int
-init_pipe_control(struct intel_engine_cs *ring)
+void
+intel_fini_pipe_control(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (ring->scratch.obj == NULL)
+ return;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ }
+
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
+}
+
+int
+intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
@@ -632,7 +684,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
- ret = init_pipe_control(ring);
+ ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
@@ -667,16 +719,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
dev_priv->semaphore_obj = NULL;
}
- if (ring->scratch.obj == NULL)
- return;
-
- if (INTEL_INFO(dev)->gen >= 5) {
- kunmap(sg_page(ring->scratch.obj->pages->sgl));
- i915_gem_object_ggtt_unpin(ring->scratch.obj);
- }
-
- drm_gem_object_unreference(&ring->scratch.obj->base);
- ring->scratch.obj = NULL;
+ intel_fini_pipe_control(ring);
}
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
@@ -1495,7 +1538,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
@@ -1506,8 +1549,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
ringbuf->obj = NULL;
}
-static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf)
+int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
@@ -1569,7 +1612,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->execlist_queue);
ringbuf->size = 32 * PAGE_SIZE;
+ ringbuf->ring = ring;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1652,13 +1697,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ringbuf);
+ ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
- if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
+ if (__intel_ring_space(request->tail, ringbuf->tail,
+ ringbuf->size) >= n) {
seqno = request->seqno;
break;
}
@@ -1675,7 +1721,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ringbuf);
+ ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1704,7 +1750,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ringbuf);
+ ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1756,7 +1802,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ringbuf);
+ ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1961,9 +2007,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
- !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ed5941078f92..9cbf7b0ebc99 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,13 @@
#define I915_CMD_HASH_ORDER 9
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -70,6 +77,7 @@ enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
+ HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
@@ -78,6 +86,7 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
+ u64 max_acthd;
u32 seqno;
int score;
enum intel_ring_hangcheck_action action;
@@ -88,6 +97,15 @@ struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
+ struct intel_engine_cs *ring;
+
+ /*
+ * FIXME: This backpointer is an artifact of the history of how the
+ * execlist patches came into being. It will get removed once the basic
+ * code has landed.
+ */
+ struct intel_context *FIXME_lrc_ctx;
+
u32 head;
u32 tail;
int space;
@@ -212,6 +230,18 @@ struct intel_engine_cs {
unsigned int num_dwords);
} semaphore;
+ /* Execlists */
+ spinlock_t execlist_lock;
+ struct list_head execlist_queue;
+ u8 next_context_status_buffer;
+ u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
+ int (*emit_request)(struct intel_ringbuffer *ringbuf);
+ int (*emit_flush)(struct intel_ringbuffer *ringbuf,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+ u64 offset, unsigned flags);
+
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@@ -285,11 +315,7 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
-{
- return ring->buffer && ring->buffer->obj;
-}
+bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
@@ -353,6 +379,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf);
+
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
@@ -370,6 +400,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
+int __intel_ring_space(int head, int tail, int size);
+int intel_ring_space(struct intel_ringbuffer *ringbuf);
+bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
@@ -377,6 +410,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *ring);
+int intel_init_pipe_control(struct intel_engine_cs *ring);
+
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 168c6652cda1..0bdb00b7c59c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,6 +53,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
@@ -81,7 +82,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
* other CPUs can see the task state update by the time we
* read the scanline.
*/
- prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
scanline = intel_get_crtc_scanline(crtc);
if (scanline < min || scanline > max)
@@ -100,7 +101,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
local_irq_disable();
}
- finish_wait(&crtc->vbl_wait, &wait);
+ finish_wait(wq, &wait);
drm_vblank_put(dev, pipe);
@@ -163,6 +164,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprctl &= ~SP_PIXFORMAT_MASK;
sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
sprctl &= ~SP_TILED;
+ sprctl &= ~SP_ROTATE_180;
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
@@ -235,6 +237,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ sprctl |= SP_ROTATE_180;
+
+ x += src_w;
+ y += src_h;
+ linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+ }
+
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -364,6 +374,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
sprctl &= ~SPRITE_TILED;
+ sprctl &= ~SPRITE_ROTATE_180;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -426,6 +437,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ sprctl |= SPRITE_ROTATE_180;
+
+ /* HSW and BDW does this automagically in hardware */
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ x += src_w;
+ y += src_h;
+ linear_offset += src_h * fb->pitches[0] +
+ src_w * pixel_size;
+ }
+ }
+
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -571,6 +594,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
dvscntr &= ~DVS_TILED;
+ dvscntr &= ~DVS_ROTATE_180;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -628,6 +652,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ dvscntr |= DVS_ROTATE_180;
+
+ x += src_w;
+ y += src_h;
+ linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+ }
+
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
intel_update_primary_plane(intel_crtc);
@@ -895,6 +927,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+ drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+ intel_plane->rotation);
+
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(hscale < 0);
@@ -933,6 +968,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+ drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+ intel_plane->rotation);
+
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src.x1 < (int) src_x ||
src.y1 < (int) src_y ||
@@ -1180,18 +1218,42 @@ out_unlock:
return ret;
}
-void intel_plane_restore(struct drm_plane *plane)
+static int intel_plane_set_property(struct drm_plane *plane,
+ struct drm_property *prop,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ uint64_t old_val;
+ int ret = -ENOENT;
+
+ if (prop == dev->mode_config.rotation_property) {
+ /* exactly one rotation angle please */
+ if (hweight32(val & 0xf) != 1)
+ return -EINVAL;
+
+ old_val = intel_plane->rotation;
+ intel_plane->rotation = val;
+ ret = intel_plane_restore(plane);
+ if (ret)
+ intel_plane->rotation = old_val;
+ }
+
+ return ret;
+}
+
+int intel_plane_restore(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
if (!plane->crtc || !plane->fb)
- return;
+ return 0;
- intel_update_plane(plane, plane->crtc, plane->fb,
- intel_plane->crtc_x, intel_plane->crtc_y,
- intel_plane->crtc_w, intel_plane->crtc_h,
- intel_plane->src_x, intel_plane->src_y,
- intel_plane->src_w, intel_plane->src_h);
+ return intel_update_plane(plane, plane->crtc, plane->fb,
+ intel_plane->crtc_x, intel_plane->crtc_y,
+ intel_plane->crtc_w, intel_plane->crtc_h,
+ intel_plane->src_x, intel_plane->src_y,
+ intel_plane->src_w, intel_plane->src_h);
}
void intel_plane_disable(struct drm_plane *plane)
@@ -1206,6 +1268,7 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
.destroy = intel_destroy_plane,
+ .set_property = intel_plane_set_property,
};
static uint32_t ilk_plane_formats[] = {
@@ -1310,13 +1373,28 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
+ intel_plane->rotation = BIT(DRM_ROTATE_0);
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
false);
- if (ret)
+ if (ret) {
kfree(intel_plane);
+ goto out;
+ }
+
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_180));
+
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&intel_plane->base.base,
+ dev->mode_config.rotation_property,
+ intel_plane->rotation);
+ out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e211eef4b7e4..32186a656816 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
+ drm_modeset_acquire_init(&ctx, 0);
+
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
} else
return connector->status;
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c3bf059ba720..37d80c122483 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -502,31 +502,31 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
return err;
}
- /* Make drm_addbufs happy by not trying to create a mapping for less
- * than a page.
+ /* Make drm_legacy_addbufs happy by not trying to create a mapping for
+ * less than a page.
*/
if (warp_size < PAGE_SIZE)
warp_size = PAGE_SIZE;
offset = 0;
- err = drm_addmap(dev, offset, warp_size,
- _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
+ err = drm_legacy_addmap(dev, offset, warp_size,
+ _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) {
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err;
}
offset += warp_size;
- err = drm_addmap(dev, offset, dma_bs->primary_size,
- _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
+ err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
+ _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
if (err) {
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err;
}
offset += dma_bs->primary_size;
- err = drm_addmap(dev, offset, secondary_size,
- _DRM_AGP, 0, &dev->agp_buffer_map);
+ err = drm_legacy_addmap(dev, offset, secondary_size,
+ _DRM_AGP, 0, &dev->agp_buffer_map);
if (err) {
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err;
@@ -538,7 +538,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
- err = drm_addbufs_agp(dev, &req);
+ err = drm_legacy_addbufs_agp(dev, &req);
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
@@ -559,8 +559,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
}
offset += secondary_size;
- err = drm_addmap(dev, offset, agp_size - offset,
- _DRM_AGP, 0, &dev_priv->agp_textures);
+ err = drm_legacy_addmap(dev, offset, agp_size - offset,
+ _DRM_AGP, 0, &dev_priv->agp_textures);
if (err) {
DRM_ERROR("Unable to map AGP texture region %d\n", err);
return err;
@@ -602,7 +602,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
*
* \todo
* Determine whether the maximum address passed to drm_pci_alloc is correct.
- * The same goes for drm_addbufs_pci.
+ * The same goes for drm_legacy_addbufs_pci.
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
@@ -622,15 +622,15 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
return -EFAULT;
}
- /* Make drm_addbufs happy by not trying to create a mapping for less
- * than a page.
+ /* Make drm_legacy_addbufs happy by not trying to create a mapping for
+ * less than a page.
*/
if (warp_size < PAGE_SIZE)
warp_size = PAGE_SIZE;
/* The proper alignment is 0x100 for this mapping */
- err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
- _DRM_READ_ONLY, &dev_priv->warp);
+ err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
+ _DRM_READ_ONLY, &dev_priv->warp);
if (err != 0) {
DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
err);
@@ -645,8 +645,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
for (primary_size = dma_bs->primary_size; primary_size != 0;
primary_size >>= 1) {
/* The proper alignment for this mapping is 0x04 */
- err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
- _DRM_READ_ONLY, &dev_priv->primary);
+ err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
+ _DRM_READ_ONLY, &dev_priv->primary);
if (!err)
break;
}
@@ -669,7 +669,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
- err = drm_addbufs_pci(dev, &req);
+ err = drm_legacy_addbufs_pci(dev, &req);
if (!err)
break;
}
@@ -708,15 +708,16 @@ static int mga_do_dma_bootstrap(struct drm_device *dev,
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
- err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
- _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
+ err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
+ _DRM_REGISTERS, _DRM_READ_ONLY,
+ &dev_priv->mmio);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
- err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
- _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
+ err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+ _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
&dev_priv->status);
if (err) {
DRM_ERROR("Unable to map status region: %d\n", err);
@@ -809,7 +810,7 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
dev_priv->texture_offset = init->texture_offset[0];
dev_priv->texture_size = init->texture_size[0];
- dev_priv->sarea = drm_getsarea(dev);
+ dev_priv->sarea = drm_legacy_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 6b1a87c8aac5..cb5c71f4b28e 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -64,6 +64,7 @@ static struct drm_driver driver = {
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
+ .set_busid = drm_pci_set_busid,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.get_vblank_counter = mga_get_vblank_counter,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index f15ea3c4a90a..97745991544d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -28,7 +28,7 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
static struct drm_driver driver;
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+static const struct pci_device_id pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
@@ -91,6 +91,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = mgag200_driver_load,
.unload = mgag200_driver_unload,
+ .set_busid = drm_pci_set_busid,
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 80de23d9b9c9..2e2b76aa4e17 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -224,7 +224,7 @@ struct mgag200_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
- u32 placements[3];
+ struct ttm_place placements[3];
int pin_count;
};
#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 45f04dea0ac2..83485ab81ce8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1483,11 +1483,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct mga_device *mdev = (struct mga_device*)dev->dev_private;
- struct mga_fbdev *mfbdev = mdev->mfbdev;
- struct drm_fb_helper *fb_helper = &mfbdev->helper;
- struct drm_fb_helper_connector *fb_helper_conn = NULL;
int bpp = 32;
- int i = 0;
if (IS_G200_SE(mdev)) {
if (mdev->unique_rev_id == 0x01) {
@@ -1537,21 +1533,14 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
}
/* Validate the mode input by the user */
- for (i = 0; i < fb_helper->connector_count; i++) {
- if (fb_helper->connector_info[i]->connector == connector) {
- /* Found the helper for this connector */
- fb_helper_conn = fb_helper->connector_info[i];
- if (fb_helper_conn->cmdline_mode.specified) {
- if (fb_helper_conn->cmdline_mode.bpp_specified) {
- bpp = fb_helper_conn->cmdline_mode.bpp;
- }
- }
- }
+ if (connector->cmdline_mode.specified) {
+ if (connector->cmdline_mode.bpp_specified)
+ bpp = connector->cmdline_mode.bpp;
}
if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
- if (fb_helper_conn)
- fb_helper_conn->cmdline_mode.specified = false;
+ if (connector->cmdline_mode.specified)
+ connector->cmdline_mode.specified = false;
return MODE_BAD;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 5a00e90696de..be883ef5a1d3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -293,18 +293,22 @@ void mgag200_mm_fini(struct mga_device *mdev)
void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
{
u32 c = 0;
- bo->placement.fpfn = 0;
- bo->placement.lpfn = 0;
+ unsigned i;
+
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
- bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
}
int mgag200_bo_create(struct drm_device *dev, int size, int align,
@@ -361,7 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -384,7 +388,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -408,7 +412,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b447c01ad89c..47ccdbf49fa1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -836,6 +836,7 @@ static struct drm_driver msm_driver = {
.open = msm_open,
.preclose = msm_preclose,
.lastclose = msm_lastclose,
+ .set_busid = drm_platform_set_busid,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 637c29a33127..40afc69a3778 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,5 +1,5 @@
config DRM_NOUVEAU
- tristate "Nouveau (nVidia) cards"
+ tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
@@ -23,7 +23,15 @@ config DRM_NOUVEAU
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
help
- Choose this option for open-source nVidia support.
+ Choose this option for open-source NVIDIA support.
+
+config NOUVEAU_PLATFORM_DRIVER
+ tristate "Nouveau (NVIDIA) SoC GPUs"
+ depends on DRM_NOUVEAU && ARCH_TEGRA
+ default y
+ help
+ Support for Nouveau platform driver, used for SoC GPUs as found
+ on NVIDIA Tegra K1.
config NOUVEAU_DEBUG
int "Maximum debug level"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 8b307e143632..f5d7f7ce4bc6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -14,8 +14,10 @@ nouveau-y += core/core/enum.o
nouveau-y += core/core/event.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
+nouveau-y += core/core/ioctl.o
nouveau-y += core/core/mm.o
nouveau-y += core/core/namedb.o
+nouveau-y += core/core/notify.o
nouveau-y += core/core/object.o
nouveau-y += core/core/option.o
nouveau-y += core/core/parent.o
@@ -26,6 +28,7 @@ nouveau-y += core/core/subdev.o
nouveau-y += core/subdev/bar/base.o
nouveau-y += core/subdev/bar/nv50.o
nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bar/gk20a.o
nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/boost.o
@@ -64,6 +67,7 @@ nouveau-y += core/subdev/clock/nva3.o
nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o
+nouveau-y += core/subdev/clock/gk20a.o
nouveau-y += core/subdev/clock/pllnv04.o
nouveau-y += core/subdev/clock/pllnva3.o
nouveau-y += core/subdev/devinit/base.o
@@ -149,8 +153,10 @@ nouveau-y += core/subdev/instmem/base.o
nouveau-y += core/subdev/instmem/nv04.o
nouveau-y += core/subdev/instmem/nv40.o
nouveau-y += core/subdev/instmem/nv50.o
-nouveau-y += core/subdev/ltcg/gf100.o
-nouveau-y += core/subdev/ltcg/gm107.o
+nouveau-y += core/subdev/ltc/base.o
+nouveau-y += core/subdev/ltc/gf100.o
+nouveau-y += core/subdev/ltc/gk104.o
+nouveau-y += core/subdev/ltc/gm107.o
nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
@@ -161,6 +167,7 @@ nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
nouveau-y += core/subdev/mc/nvc0.o
nouveau-y += core/subdev/mc/nvc3.o
+nouveau-y += core/subdev/mc/gk20a.o
nouveau-y += core/subdev/mxm/base.o
nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
@@ -169,6 +176,7 @@ nouveau-y += core/subdev/pwr/memx.o
nouveau-y += core/subdev/pwr/nva3.o
nouveau-y += core/subdev/pwr/nvc0.o
nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/gk104.o
nouveau-y += core/subdev/pwr/nv108.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
@@ -211,6 +219,7 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/acpi.o
nouveau-y += core/engine/device/base.o
nouveau-y += core/engine/device/ctrl.o
nouveau-y += core/engine/device/nv04.o
@@ -270,6 +279,7 @@ nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
nouveau-y += core/engine/graph/ctxgk20a.o
nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxgk110b.o
nouveau-y += core/engine/graph/ctxnv108.o
nouveau-y += core/engine/graph/ctxgm107.o
nouveau-y += core/engine/graph/nv04.o
@@ -291,6 +301,7 @@ nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/gk20a.o
nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/gk110b.o
nouveau-y += core/engine/graph/nv108.o
nouveau-y += core/engine/graph/gm107.o
nouveau-y += core/engine/mpeg/nv31.o
@@ -318,11 +329,18 @@ nouveau-y += core/engine/vp/nv98.o
nouveau-y += core/engine/vp/nvc0.o
nouveau-y += core/engine/vp/nve0.o
+# nvif
+nouveau-y += nvif/object.o
+nouveau-y += nvif/client.o
+nouveau-y += nvif/device.o
+nouveau-y += nvif/notify.o
+
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nouveau_nvif.o nouveau_usif.o
nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
@@ -349,3 +367,6 @@ nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
+
+# platform driver
+obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 9079c0ac58e6..68bf06768123 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -26,13 +26,167 @@
#include <core/client.h>
#include <core/handle.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+
+#include <nvif/unpack.h>
+#include <nvif/event.h>
#include <engine/device.h>
+struct nvkm_client_notify {
+ struct nouveau_client *client;
+ struct nvkm_notify n;
+ u8 version;
+ u8 size;
+ union {
+ struct nvif_notify_rep_v0 v0;
+ } rep;
+};
+
+static int
+nvkm_client_notify(struct nvkm_notify *n)
+{
+ struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n);
+ struct nouveau_client *client = notify->client;
+ return client->ntfy(&notify->rep, notify->size, n->data, n->size);
+}
+
+int
+nvkm_client_notify_put(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_put(&client->notify[index]->n);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_get(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_get(&client->notify[index]->n);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_del(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_fini(&client->notify[index]->n);
+ kfree(client->notify[index]);
+ client->notify[index] = NULL;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_new(struct nouveau_client *client,
+ struct nvkm_event *event, void *data, u32 size)
+{
+ struct nvkm_client_notify *notify;
+ union {
+ struct nvif_notify_req_v0 v0;
+ } *req = data;
+ u8 index, reply;
+ int ret;
+
+ for (index = 0; index < ARRAY_SIZE(client->notify); index++) {
+ if (!client->notify[index])
+ break;
+ }
+
+ if (index == ARRAY_SIZE(client->notify))
+ return -ENOSPC;
+
+ notify = kzalloc(sizeof(*notify), GFP_KERNEL);
+ if (!notify)
+ return -ENOMEM;
+
+ nv_ioctl(client, "notify new size %d\n", size);
+ if (nvif_unpack(req->v0, 0, 0, true)) {
+ nv_ioctl(client, "notify new vers %d reply %d route %02x "
+ "token %llx\n", req->v0.version,
+ req->v0.reply, req->v0.route, req->v0.token);
+ notify->version = req->v0.version;
+ notify->size = sizeof(notify->rep.v0);
+ notify->rep.v0.version = req->v0.version;
+ notify->rep.v0.route = req->v0.route;
+ notify->rep.v0.token = req->v0.token;
+ reply = req->v0.reply;
+ }
+
+ if (ret == 0) {
+ ret = nvkm_notify_init(event, nvkm_client_notify, false,
+ data, size, reply, &notify->n);
+ if (ret == 0) {
+ client->notify[index] = notify;
+ notify->client = client;
+ return index;
+ }
+ }
+
+ kfree(notify);
+ return ret;
+}
+
+static int
+nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
+{
+ union {
+ struct nv_client_devlist_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "client devlist size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "client devlist vers %d count %d\n",
+ args->v0.version, args->v0.count);
+ if (size == sizeof(args->v0.device[0]) * args->v0.count) {
+ ret = nouveau_device_list(args->v0.device,
+ args->v0.count);
+ if (ret >= 0) {
+ args->v0.count = ret;
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nouveau_client_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NV_CLIENT_DEVLIST:
+ return nouveau_client_devlist(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_client_dtor(struct nouveau_object *object)
{
struct nouveau_client *client = (void *)object;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_del(client, i);
nouveau_object_ref(NULL, &client->device);
nouveau_handle_destroy(client->root);
nouveau_namedb_destroy(&client->base);
@@ -42,6 +196,7 @@ static struct nouveau_oclass
nouveau_client_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.dtor = nouveau_client_dtor,
+ .mthd = nouveau_client_mthd,
},
};
@@ -93,9 +248,12 @@ int
nouveau_client_fini(struct nouveau_client *client, bool suspend)
{
const char *name[2] = { "fini", "suspend" };
- int ret;
-
+ int ret, i;
nv_debug(client, "%s running\n", name[suspend]);
+ nv_debug(client, "%s notify\n", name[suspend]);
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_put(client, i);
+ nv_debug(client, "%s object\n", name[suspend]);
ret = nouveau_handle_fini(client->root, suspend);
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ae81d3b5d8b7..0540a48c5678 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013 Red Hat Inc.
+ * Copyright 2013-2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,173 +24,77 @@
#include <core/event.h>
void
-nouveau_event_put(struct nouveau_eventh *handler)
+nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- u32 m, t;
-
- if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
- return;
-
- spin_lock_irqsave(&event->refs_lock, flags);
- for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
- if (!--event->refs[handler->index * event->types_nr + t]) {
- if (event->disable)
- event->disable(event, 1 << t, handler->index);
+ BUG_ON(!spin_is_locked(&event->refs_lock));
+ while (types) {
+ int type = __ffs(types); types &= ~(1 << type);
+ if (--event->refs[index * event->types_nr + type] == 0) {
+ if (event->func->fini)
+ event->func->fini(event, 1 << type, index);
}
-
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
}
void
-nouveau_event_get(struct nouveau_eventh *handler)
+nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- u32 m, t;
-
- if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
- return;
-
- spin_lock_irqsave(&event->refs_lock, flags);
- for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
- if (!event->refs[handler->index * event->types_nr + t]++) {
- if (event->enable)
- event->enable(event, 1 << t, handler->index);
+ BUG_ON(!spin_is_locked(&event->refs_lock));
+ while (types) {
+ int type = __ffs(types); types &= ~(1 << type);
+ if (++event->refs[index * event->types_nr + type] == 1) {
+ if (event->func->init)
+ event->func->init(event, 1 << type, index);
}
-
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
-}
-
-static void
-nouveau_event_fini(struct nouveau_eventh *handler)
-{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- nouveau_event_put(handler);
- spin_lock_irqsave(&event->list_lock, flags);
- list_del(&handler->head);
- spin_unlock_irqrestore(&event->list_lock, flags);
-}
-
-static int
-nouveau_event_init(struct nouveau_event *event, u32 types, int index,
- int (*func)(void *, u32, int), void *priv,
- struct nouveau_eventh *handler)
-{
- unsigned long flags;
-
- if (types & ~((1 << event->types_nr) - 1))
- return -EINVAL;
- if (index >= event->index_nr)
- return -EINVAL;
-
- handler->event = event;
- handler->flags = 0;
- handler->types = types;
- handler->index = index;
- handler->func = func;
- handler->priv = priv;
-
- spin_lock_irqsave(&event->list_lock, flags);
- list_add_tail(&handler->head, &event->list[index]);
- spin_unlock_irqrestore(&event->list_lock, flags);
- return 0;
-}
-
-int
-nouveau_event_new(struct nouveau_event *event, u32 types, int index,
- int (*func)(void *, u32, int), void *priv,
- struct nouveau_eventh **phandler)
-{
- struct nouveau_eventh *handler;
- int ret = -ENOMEM;
-
- if (event->check) {
- ret = event->check(event, types, index);
- if (ret)
- return ret;
- }
-
- handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
- if (handler) {
- ret = nouveau_event_init(event, types, index, func, priv, handler);
- if (ret)
- kfree(handler);
- }
-
- return ret;
-}
-
-void
-nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
-{
- BUG_ON(handler != NULL);
- if (*ref) {
- nouveau_event_fini(*ref);
- kfree(*ref);
- }
- *ref = handler;
}
void
-nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
+nvkm_event_send(struct nvkm_event *event, u32 types, int index,
+ void *data, u32 size)
{
- struct nouveau_eventh *handler;
+ struct nvkm_notify *notify;
unsigned long flags;
- if (WARN_ON(index >= event->index_nr))
+ if (!event->refs || WARN_ON(index >= event->index_nr))
return;
spin_lock_irqsave(&event->list_lock, flags);
- list_for_each_entry(handler, &event->list[index], head) {
- if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
- continue;
- if (!(handler->types & types))
- continue;
- if (handler->func(handler->priv, handler->types & types, index)
- != NVKM_EVENT_DROP)
- continue;
- nouveau_event_put(handler);
+ list_for_each_entry(notify, &event->list, head) {
+ if (notify->index == index && (notify->types & types)) {
+ if (event->func->send) {
+ event->func->send(data, size, notify);
+ continue;
+ }
+ nvkm_notify_send(notify, data, size);
+ }
}
spin_unlock_irqrestore(&event->list_lock, flags);
}
void
-nouveau_event_destroy(struct nouveau_event **pevent)
+nvkm_event_fini(struct nvkm_event *event)
{
- struct nouveau_event *event = *pevent;
- if (event) {
- kfree(event);
- *pevent = NULL;
+ if (event->refs) {
+ kfree(event->refs);
+ event->refs = NULL;
}
}
int
-nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
+nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
+ struct nvkm_event *event)
{
- struct nouveau_event *event;
- int i;
-
- event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
- sizeof(event->refs[0]), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
- if (!event->list) {
- kfree(event);
+ event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
+ GFP_KERNEL);
+ if (!event->refs)
return -ENOMEM;
- }
- spin_lock_init(&event->list_lock);
- spin_lock_init(&event->refs_lock);
- for (i = 0; i < index_nr; i++)
- INIT_LIST_HEAD(&event->list[i]);
+ event->func = func;
event->types_nr = types_nr;
event->index_nr = index_nr;
+ spin_lock_init(&event->refs_lock);
+ spin_lock_init(&event->list_lock);
+ INIT_LIST_HEAD(&event->list);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index 264c2b338ac3..a490b805d7e3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -146,9 +146,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
-
*phandle = handle;
-
return 0;
}
@@ -224,3 +222,116 @@ nouveau_handle_put(struct nouveau_handle *handle)
if (handle)
nouveau_namedb_put(handle);
}
+
+int
+nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+ u16 _oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ int ret;
+
+ /* lookup parent object and ensure it *is* a parent */
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent) {
+ nv_error(client, "parent 0x%08x not found\n", _parent);
+ return -ENOENT;
+ }
+
+ if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+ nv_error(parent, "cannot have children\n");
+ ret = -EINVAL;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ *pobject = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+}
+
+int
+nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent)
+ return -ENOENT;
+
+ namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+ if (namedb) {
+ handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+ if (handle) {
+ nouveau_namedb_put(handle);
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+ }
+
+ nouveau_object_ref(NULL, &parent);
+ return handle ? 0 : -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c
new file mode 100644
index 000000000000..f7e19bfb489c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/ioctl.h>
+#include <core/event.h>
+
+#include <nvif/unpack.h>
+#include <nvif/ioctl.h>
+
+static int
+nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_nop none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "nop size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "nop\n");
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_sclass_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (!nv_iclass(object, NV_PARENT_CLASS)) {
+ nv_debug(object, "cannot have children (sclass)\n");
+ return -ENODEV;
+ }
+
+ nv_ioctl(object, "sclass size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "sclass vers %d count %d\n",
+ args->v0.version, args->v0.count);
+ if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
+ ret = nouveau_parent_lclass(object, args->v0.oclass,
+ args->v0.count);
+ if (ret >= 0) {
+ args->v0.count = ret;
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
+{
+ union {
+ struct nvif_ioctl_new_v0 v0;
+ } *args = data;
+ struct nouveau_client *client = nouveau_client(parent->object);
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ u32 _handle, _oclass;
+ int ret;
+
+ nv_ioctl(client, "new size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ _handle = args->v0.handle;
+ _oclass = args->v0.oclass;
+ } else
+ return ret;
+
+ nv_ioctl(client, "new vers %d handle %08x class %08x "
+ "route %02x token %llx\n",
+ args->v0.version, _handle, _oclass,
+ args->v0.route, args->v0.token);
+
+ if (!nv_iclass(parent->object, NV_PARENT_CLASS)) {
+ nv_debug(parent->object, "cannot have children (ctor)\n");
+ ret = -ENODEV;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent->object, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent->object, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent->object, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ client->data = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent->object, parent->name,
+ _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ handle->route = args->v0.route;
+ handle->token = args->v0.token;
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ return ret;
+}
+
+static int
+nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_del none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "delete size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "delete\n");
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_mthd_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "mthd vers %d mthd %02x\n",
+ args->v0.version, args->v0.method);
+ if (ret = -ENODEV, ofuncs->mthd)
+ ret = ofuncs->mthd(object, args->v0.method, data, size);
+ }
+
+ return ret;
+}
+
+
+static int
+nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_rd_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "rd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
+ args->v0.version, args->v0.size, args->v0.addr);
+ switch (args->v0.size) {
+ case 1:
+ if (ret = -ENODEV, ofuncs->rd08) {
+ args->v0.data = nv_ro08(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ case 2:
+ if (ret = -ENODEV, ofuncs->rd16) {
+ args->v0.data = nv_ro16(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ case 4:
+ if (ret = -ENODEV, ofuncs->rd32) {
+ args->v0.data = nv_ro32(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_wr_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "wr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
+ args->v0.version, args->v0.size, args->v0.addr,
+ args->v0.data);
+ switch (args->v0.size) {
+ case 1:
+ if (ret = -ENODEV, ofuncs->wr08) {
+ nv_wo08(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ case 2:
+ if (ret = -ENODEV, ofuncs->wr16) {
+ nv_wo16(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ case 4:
+ if (ret = -ENODEV, ofuncs->wr32) {
+ nv_wo32(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_map_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "map size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "map vers %d\n", args->v0.version);
+ if (ret = -ENODEV, ofuncs->map) {
+ ret = ofuncs->map(object, &args->v0.handle,
+ &args->v0.length);
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_unmap none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "unmap size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "unmap\n");
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_ntfy_new_v0 v0;
+ } *args = data;
+ struct nvkm_event *event;
+ int ret;
+
+ nv_ioctl(object, "ntfy new size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "ntfy new vers %d event %02x\n",
+ args->v0.version, args->v0.event);
+ if (ret = -ENODEV, ofuncs->ntfy)
+ ret = ofuncs->ntfy(object, args->v0.event, &event);
+ if (ret == 0) {
+ ret = nvkm_client_notify_new(client, event, data, size);
+ if (ret >= 0) {
+ args->v0.index = ret;
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy del size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy del vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_del(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_get_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy get size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy get vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_get(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_put_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy put size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy put vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_put(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static struct {
+ int version;
+ int (*func)(struct nouveau_handle *, void *, u32);
+}
+nvkm_ioctl_v0[] = {
+ { 0x00, nvkm_ioctl_nop },
+ { 0x00, nvkm_ioctl_sclass },
+ { 0x00, nvkm_ioctl_new },
+ { 0x00, nvkm_ioctl_del },
+ { 0x00, nvkm_ioctl_mthd },
+ { 0x00, nvkm_ioctl_rd },
+ { 0x00, nvkm_ioctl_wr },
+ { 0x00, nvkm_ioctl_map },
+ { 0x00, nvkm_ioctl_unmap },
+ { 0x00, nvkm_ioctl_ntfy_new },
+ { 0x00, nvkm_ioctl_ntfy_del },
+ { 0x00, nvkm_ioctl_ntfy_get },
+ { 0x00, nvkm_ioctl_ntfy_put },
+};
+
+static int
+nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
+ u32 *path, void *data, u32 size,
+ u8 owner, u8 *route, u64 *token)
+{
+ struct nouveau_handle *handle = parent;
+ struct nouveau_namedb *namedb;
+ struct nouveau_object *object;
+ int ret;
+
+ while ((object = parent->object), nr--) {
+ nv_ioctl(object, "path 0x%08x\n", path[nr]);
+ if (!nv_iclass(object, NV_PARENT_CLASS)) {
+ nv_debug(object, "cannot have children (path)\n");
+ return -EINVAL;
+ }
+
+ if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
+ !(handle = nouveau_namedb_get(namedb, path[nr]))) {
+ nv_debug(object, "handle 0x%08x not found\n", path[nr]);
+ return -ENOENT;
+ }
+ nouveau_namedb_put(handle);
+ parent = handle;
+ }
+
+ if (owner != NVIF_IOCTL_V0_OWNER_ANY &&
+ owner != handle->route) {
+ nv_ioctl(object, "object route != owner\n");
+ return -EACCES;
+ }
+ *route = handle->route;
+ *token = handle->token;
+
+ if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
+ if (nvkm_ioctl_v0[type].version == 0) {
+ ret = nvkm_ioctl_v0[type].func(handle, data, size);
+ }
+ }
+
+ return ret;
+}
+
+int
+nvkm_ioctl(struct nouveau_client *client, bool supervisor,
+ void *data, u32 size, void **hack)
+{
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *args = data;
+ int ret;
+
+ client->super = supervisor;
+ nv_ioctl(client, "size %d\n", size);
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
+ args->v0.version, args->v0.type, args->v0.path_nr,
+ args->v0.owner);
+ ret = nvkm_ioctl_path(client->root, args->v0.type,
+ args->v0.path_nr, args->v0.path,
+ data, size, args->v0.owner,
+ &args->v0.route, &args->v0.token);
+ }
+
+ nv_ioctl(client, "return %d\n", ret);
+ if (hack) {
+ *hack = client->data;
+ client->data = NULL;
+ }
+ client->super = false;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
new file mode 100644
index 000000000000..76adb81bdea2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/client.h>
+#include <core/event.h>
+#include <core/notify.h>
+
+#include <nvif/unpack.h>
+#include <nvif/event.h>
+
+static inline void
+nvkm_notify_put_locked(struct nvkm_notify *notify)
+{
+ if (notify->block++ == 0)
+ nvkm_event_put(notify->event, notify->types, notify->index);
+}
+
+void
+nvkm_notify_put(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+ if (likely(event) &&
+ test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_put_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
+ flush_work(&notify->work);
+ }
+}
+
+static inline void
+nvkm_notify_get_locked(struct nvkm_notify *notify)
+{
+ if (--notify->block == 0)
+ nvkm_event_get(notify->event, notify->types, notify->index);
+}
+
+void
+nvkm_notify_get(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+ if (likely(event) &&
+ !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_get_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ }
+}
+
+static inline void
+nvkm_notify_func(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ int ret = notify->func(notify);
+ unsigned long flags;
+ if ((ret == NVKM_NOTIFY_KEEP) ||
+ !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_get_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ }
+}
+
+static void
+nvkm_notify_work(struct work_struct *work)
+{
+ struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
+ nvkm_notify_func(notify);
+}
+
+void
+nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+
+ BUG_ON(!spin_is_locked(&event->list_lock));
+ BUG_ON(size != notify->size);
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ if (notify->block) {
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ return;
+ }
+ nvkm_notify_put_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+
+ if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
+ memcpy((void *)notify->data, data, size);
+ schedule_work(&notify->work);
+ } else {
+ notify->data = data;
+ nvkm_notify_func(notify);
+ notify->data = NULL;
+ }
+}
+
+void
+nvkm_notify_fini(struct nvkm_notify *notify)
+{
+ unsigned long flags;
+ if (notify->event) {
+ nvkm_notify_put(notify);
+ spin_lock_irqsave(&notify->event->list_lock, flags);
+ list_del(&notify->head);
+ spin_unlock_irqrestore(&notify->event->list_lock, flags);
+ kfree((void *)notify->data);
+ notify->event = NULL;
+ }
+}
+
+int
+nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *),
+ bool work, void *data, u32 size, u32 reply,
+ struct nvkm_notify *notify)
+{
+ unsigned long flags;
+ int ret = -ENODEV;
+ if ((notify->event = event), event->refs) {
+ ret = event->func->ctor(data, size, notify);
+ if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
+ notify->flags = 0;
+ notify->block = 1;
+ notify->func = func;
+ notify->data = NULL;
+ if (ret = 0, work) {
+ INIT_WORK(&notify->work, nvkm_notify_work);
+ set_bit(NVKM_NOTIFY_WORK, &notify->flags);
+ notify->data = kmalloc(reply, GFP_KERNEL);
+ if (!notify->data)
+ ret = -ENOMEM;
+ }
+ }
+ if (ret == 0) {
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_add_tail(&notify->head, &event->list);
+ spin_unlock_irqrestore(&event->list_lock, flags);
+ }
+ }
+ if (ret)
+ notify->event = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 124538555904..b08630577c82 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -23,9 +23,6 @@
*/
#include <core/object.h>
-#include <core/parent.h>
-#include <core/namedb.h>
-#include <core/handle.h>
#include <core/engine.h>
#ifdef NOUVEAU_OBJECT_MAGIC
@@ -61,21 +58,15 @@ nouveau_object_create_(struct nouveau_object *parent,
return 0;
}
-static int
+int
_nouveau_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_object *object;
- int ret;
-
- ret = nouveau_object_create(parent, engine, oclass, 0, &object);
- *pobject = nv_object(object);
- if (ret)
- return ret;
-
- return 0;
+ if (size != 0)
+ return -ENOSYS;
+ return nouveau_object_create(parent, engine, oclass, 0, pobject);
}
void
@@ -91,42 +82,24 @@ nouveau_object_destroy(struct nouveau_object *object)
kfree(object);
}
-static void
-_nouveau_object_dtor(struct nouveau_object *object)
-{
- nouveau_object_destroy(object);
-}
-
int
nouveau_object_init(struct nouveau_object *object)
{
return 0;
}
-static int
-_nouveau_object_init(struct nouveau_object *object)
-{
- return nouveau_object_init(object);
-}
-
int
nouveau_object_fini(struct nouveau_object *object, bool suspend)
{
return 0;
}
-static int
-_nouveau_object_fini(struct nouveau_object *object, bool suspend)
-{
- return nouveau_object_fini(object, suspend);
-}
-
struct nouveau_ofuncs
nouveau_object_ofuncs = {
.ctor = _nouveau_object_ctor,
- .dtor = _nouveau_object_dtor,
- .init = _nouveau_object_init,
- .fini = _nouveau_object_fini,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
};
int
@@ -189,119 +162,6 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
}
int
-nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
- u16 _oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *engctx = NULL;
- struct nouveau_object *object = NULL;
- struct nouveau_object *engine;
- struct nouveau_oclass *oclass;
- struct nouveau_handle *handle;
- int ret;
-
- /* lookup parent object and ensure it *is* a parent */
- parent = nouveau_handle_ref(client, _parent);
- if (!parent) {
- nv_error(client, "parent 0x%08x not found\n", _parent);
- return -ENOENT;
- }
-
- if (!nv_iclass(parent, NV_PARENT_CLASS)) {
- nv_error(parent, "cannot have children\n");
- ret = -EINVAL;
- goto fail_class;
- }
-
- /* check that parent supports the requested subclass */
- ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
- if (ret) {
- nv_debug(parent, "illegal class 0x%04x\n", _oclass);
- goto fail_class;
- }
-
- /* make sure engine init has been completed *before* any objects
- * it controls are created - the constructors may depend on
- * state calculated at init (ie. default context construction)
- */
- if (engine) {
- ret = nouveau_object_inc(engine);
- if (ret)
- goto fail_class;
- }
-
- /* if engine requires it, create a context object to insert
- * between the parent and its children (eg. PGRAPH context)
- */
- if (engine && nv_engine(engine)->cclass) {
- ret = nouveau_object_ctor(parent, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
- if (ret)
- goto fail_engctx;
- } else {
- nouveau_object_ref(parent, &engctx);
- }
-
- /* finally, create new object and bind it to its handle */
- ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
- *pobject = object;
- if (ret)
- goto fail_ctor;
-
- ret = nouveau_object_inc(object);
- if (ret)
- goto fail_init;
-
- ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
- if (ret)
- goto fail_handle;
-
- ret = nouveau_handle_init(handle);
- if (ret)
- nouveau_handle_destroy(handle);
-
-fail_handle:
- nouveau_object_dec(object, false);
-fail_init:
- nouveau_object_ref(NULL, &object);
-fail_ctor:
- nouveau_object_ref(NULL, &engctx);
-fail_engctx:
- if (engine)
- nouveau_object_dec(engine, false);
-fail_class:
- nouveau_object_ref(NULL, &parent);
- return ret;
-}
-
-int
-nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *namedb = NULL;
- struct nouveau_handle *handle = NULL;
-
- parent = nouveau_handle_ref(client, _parent);
- if (!parent)
- return -ENOENT;
-
- namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
- if (namedb) {
- handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
- if (handle) {
- nouveau_namedb_put(handle);
- nouveau_handle_fini(handle, false);
- nouveau_handle_destroy(handle);
- }
- }
-
- nouveau_object_ref(NULL, &parent);
- return handle ? 0 : -EINVAL;
-}
-
-int
nouveau_object_inc(struct nouveau_object *object)
{
int ref = atomic_add_return(1, &object->usecount);
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index dee5d1235e9b..8701968a9743 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -75,6 +75,39 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
}
int
+nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
+{
+ struct nouveau_sclass *sclass;
+ struct nouveau_engine *engine;
+ struct nouveau_oclass *oclass;
+ int nr = -1, i;
+ u64 mask;
+
+ sclass = nv_parent(parent)->sclass;
+ while (sclass) {
+ if (++nr < size)
+ lclass[nr] = sclass->oclass->handle;
+ sclass = sclass->sclass;
+ }
+
+ mask = nv_parent(parent)->engine;
+ while (i = __ffs64(mask), mask) {
+ engine = nouveau_engine(parent, i);
+ if (engine && (oclass = engine->sclass)) {
+ while (oclass->ofuncs) {
+ if (++nr < size)
+ lclass[nr] = oclass->handle;
+ oclass++;
+ }
+ }
+
+ mask &= ~(1ULL << i);
+ }
+
+ return nr + 1;
+}
+
+int
nouveau_parent_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u32 pclass,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index f31527733e00..abb410ef09ea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -30,7 +30,6 @@
#include <subdev/vm.h>
#include <core/client.h>
-#include <core/class.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index ac3291f781f6..9261694d0d35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -26,9 +26,7 @@
#include <engine/fifo.h>
#include <engine/copy.h>
-#include <core/class.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/enum.h>
#include "fuc/nvc0.fuc.h"
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 748a61eb3c6f..c7194b354605 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -24,7 +24,6 @@
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/copy.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 2551dafbec73..ea5c42f31791 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -25,7 +25,6 @@
#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index c7082377ec76..5571c09534cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -25,7 +25,6 @@
#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/timer.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
new file mode 100644
index 000000000000..4dbf0ba89e5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "acpi.h"
+
+#ifdef CONFIG_ACPI
+static int
+nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct nouveau_device *device =
+ container_of(nb, typeof(*device), acpi.nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, "ac_adapter"))
+ nvkm_event_send(&device->event, 1, 0, NULL, 0);
+
+ return NOTIFY_DONE;
+}
+#endif
+
+int
+nvkm_acpi_fini(struct nouveau_device *device, bool suspend)
+{
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&device->acpi.nb);
+#endif
+ return 0;
+}
+
+int
+nvkm_acpi_init(struct nouveau_device *device)
+{
+#ifdef CONFIG_ACPI
+ device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
+ register_acpi_notifier(&device->acpi.nb);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
new file mode 100644
index 000000000000..cc49f4f568cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_DEVICE_ACPI_H__
+#define __NVKM_DEVICE_ACPI_H__
+
+#include <engine/device.h>
+
+int nvkm_acpi_init(struct nouveau_device *);
+int nvkm_acpi_fini(struct nouveau_device *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 18c8c7245b73..8928f7981d4a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -26,10 +26,14 @@
#include <core/device.h>
#include <core/client.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
-#include <core/class.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
#include "priv.h"
+#include "acpi.h"
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -49,74 +53,258 @@ nouveau_device_find(u64 name)
return match;
}
+int
+nouveau_device_list(u64 *name, int size)
+{
+ struct nouveau_device *device;
+ int nr = 0;
+ mutex_lock(&nv_devices_mutex);
+ list_for_each_entry(device, &nv_devices, head) {
+ if (nr++ < size)
+ name[nr - 1] = device->handle;
+ }
+ mutex_unlock(&nv_devices_mutex);
+ return nr;
+}
+
/******************************************************************************
* nouveau_devobj (0x0080): class implementation
*****************************************************************************/
+
struct nouveau_devobj {
struct nouveau_parent base;
struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
};
+static int
+nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nouveau_instmem *imem = nouveau_instmem(device);
+ union {
+ struct nv_device_info_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "device info size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "device info vers %d\n", args->v0.version);
+ } else
+ return ret;
+
+ switch (device->chipset) {
+ case 0x01a:
+ case 0x01f:
+ case 0x04c:
+ case 0x04e:
+ case 0x063:
+ case 0x067:
+ case 0x068:
+ case 0x0aa:
+ case 0x0ac:
+ case 0x0af:
+ args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+ break;
+ default:
+ if (device->pdev) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
+ args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+ else
+ if (pci_is_pcie(device->pdev))
+ args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+ else
+ args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+ } else {
+ args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+ }
+ break;
+ }
+
+ switch (device->card_type) {
+ case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+ case NV_10:
+ case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+ case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+ case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+ case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+ case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+ case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+ case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+ case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ default:
+ args->v0.family = 0;
+ break;
+ }
+
+ args->v0.chipset = device->chipset;
+ args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
+ if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
+ else args->v0.ram_size = args->v0.ram_user = 0;
+ if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
+ return 0;
+}
+
+static int
+nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NV_DEVICE_V0_INFO:
+ return nouveau_devobj_info(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static u8
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd08(object->engine, addr);
+}
+
+static u16
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd16(object->engine, addr);
+}
+
+static u32
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd32(object->engine, addr);
+}
+
+static void
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
+{
+ nv_wr08(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
+{
+ nv_wr16(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ nv_wr32(object->engine, addr, data);
+}
+
+static int
+nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nouveau_device *device = nv_device(object);
+ *addr = nv_device_resource_start(device, 0);
+ *size = nv_device_resource_len(device, 0);
+ return 0;
+}
+
static const u64 disable_map[] = {
- [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
- [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
- [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
- [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
- [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
- [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
- [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
- [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
- [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
- [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
- [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
- [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
- [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
- [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
+ [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
+ [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH,
+ [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
+ [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
+ [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
+ [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT,
+ [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
+ [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
+ [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
+ [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
+ [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
+ [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
+static void
+nouveau_devobj_dtor(struct nouveau_object *object)
+{
+ struct nouveau_devobj *devobj = (void *)object;
+ int i;
+
+ for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
+ nouveau_object_ref(NULL, &devobj->subdev[i]);
+
+ nouveau_parent_destroy(&devobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_devobj_oclass_super = {
+ .handle = NV_DEVICE,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_devobj_dtor,
+ .init = _nouveau_parent_init,
+ .fini = _nouveau_parent_fini,
+ .mthd = nouveau_devobj_mthd,
+ .map = nouveau_devobj_map,
+ .rd08 = nouveau_devobj_rd08,
+ .rd16 = nouveau_devobj_rd16,
+ .rd32 = nouveau_devobj_rd32,
+ .wr08 = nouveau_devobj_wr08,
+ .wr16 = nouveau_devobj_wr16,
+ .wr32 = nouveau_devobj_wr32,
+ }
+};
+
static int
nouveau_devobj_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
struct nouveau_client *client = nv_client(parent);
struct nouveau_device *device;
struct nouveau_devobj *devobj;
- struct nv_device_class *args = data;
u32 boot0, strap;
u64 disable, mmio_base, mmio_size;
void __iomem *map;
int ret, i, c;
- if (size < sizeof(struct nv_device_class))
- return -EINVAL;
+ nv_ioctl(parent, "create device size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create device v%d device %016llx "
+ "disable %016llx debug0 %016llx\n",
+ args->v0.version, args->v0.device,
+ args->v0.disable, args->v0.debug0);
+ } else
+ return ret;
+
+ /* give priviledged clients register access */
+ if (client->super)
+ oclass = &nouveau_devobj_oclass_super;
/* find the device subdev that matches what the client requested */
device = nv_device(client->device);
- if (args->device != ~0) {
- device = nouveau_device_find(args->device);
+ if (args->v0.device != ~0) {
+ device = nouveau_device_find(args->v0.device);
if (!device)
return -ENODEV;
}
@@ -135,14 +323,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
mmio_size = nv_device_resource_len(device, 0);
/* translate api disable mask into internal mapping */
- disable = args->debug0;
+ disable = args->v0.debug0;
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
- if (args->disable & disable_map[i])
+ if (args->v0.disable & disable_map[i])
disable |= (1ULL << i);
}
/* identify the chipset, and determine classes of subdev/engines */
- if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
+ if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
!device->card_type) {
map = ioremap(mmio_base, 0x102000);
if (map == NULL)
@@ -180,8 +368,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0x080:
case 0x090:
case 0x0a0: device->card_type = NV_50; break;
- case 0x0c0: device->card_type = NV_C0; break;
- case 0x0d0: device->card_type = NV_D0; break;
+ case 0x0c0:
+ case 0x0d0: device->card_type = NV_C0; break;
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
@@ -206,8 +394,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case NV_30: ret = nv30_identify(device); break;
case NV_40: ret = nv40_identify(device); break;
case NV_50: ret = nv50_identify(device); break;
- case NV_C0:
- case NV_D0: ret = nvc0_identify(device); break;
+ case NV_C0: ret = nvc0_identify(device); break;
case NV_E0: ret = nve0_identify(device); break;
case GM100: ret = gm100_identify(device); break;
default:
@@ -242,7 +429,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
}
- if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
+ if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
!nv_subdev(device)->mmio) {
nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
if (!nv_subdev(device)->mmio) {
@@ -298,71 +485,19 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
return 0;
}
-static void
-nouveau_devobj_dtor(struct nouveau_object *object)
-{
- struct nouveau_devobj *devobj = (void *)object;
- int i;
-
- for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
- nouveau_object_ref(NULL, &devobj->subdev[i]);
-
- nouveau_parent_destroy(&devobj->base);
-}
-
-static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
-{
- return nv_rd08(object->engine, addr);
-}
-
-static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
-{
- return nv_rd16(object->engine, addr);
-}
-
-static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
-{
- return nv_rd32(object->engine, addr);
-}
-
-static void
-nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
-{
- nv_wr08(object->engine, addr, data);
-}
-
-static void
-nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
-{
- nv_wr16(object->engine, addr, data);
-}
-
-static void
-nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- nv_wr32(object->engine, addr, data);
-}
-
static struct nouveau_ofuncs
nouveau_devobj_ofuncs = {
.ctor = nouveau_devobj_ctor,
.dtor = nouveau_devobj_dtor,
.init = _nouveau_parent_init,
.fini = _nouveau_parent_fini,
- .rd08 = nouveau_devobj_rd08,
- .rd16 = nouveau_devobj_rd16,
- .rd32 = nouveau_devobj_rd32,
- .wr08 = nouveau_devobj_wr08,
- .wr16 = nouveau_devobj_wr16,
- .wr32 = nouveau_devobj_wr32,
+ .mthd = nouveau_devobj_mthd,
};
/******************************************************************************
* nouveau_device: engine functions
*****************************************************************************/
+
static struct nouveau_oclass
nouveau_device_sclass[] = {
{ 0x0080, &nouveau_devobj_ofuncs },
@@ -370,6 +505,23 @@ nouveau_device_sclass[] = {
};
static int
+nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ if (!WARN_ON(size != 0)) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_event_func
+nouveau_device_event_func = {
+ .ctor = nouveau_device_event_ctor,
+};
+
+static int
nouveau_device_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_device *device = (void *)object;
@@ -386,7 +538,7 @@ nouveau_device_fini(struct nouveau_object *object, bool suspend)
}
}
- ret = 0;
+ ret = nvkm_acpi_fini(device, suspend);
fail:
for (; ret && i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
@@ -407,7 +559,11 @@ nouveau_device_init(struct nouveau_object *object)
{
struct nouveau_device *device = (void *)object;
struct nouveau_object *subdev;
- int ret, i;
+ int ret, i = 0;
+
+ ret = nvkm_acpi_init(device);
+ if (ret)
+ goto fail;
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
@@ -430,6 +586,8 @@ fail:
}
}
+ if (ret)
+ nvkm_acpi_fini(device, false);
return ret;
}
@@ -438,6 +596,8 @@ nouveau_device_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = (void *)object;
+ nvkm_event_fini(&device->event);
+
mutex_lock(&nv_devices_mutex);
list_del(&device->head);
mutex_unlock(&nv_devices_mutex);
@@ -478,31 +638,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
}
}
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page)
-{
- dma_addr_t ret;
-
- if (nv_device_is_pci(device)) {
- ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, ret))
- ret = 0;
- } else {
- ret = page_to_phys(page);
- }
-
- return ret;
-}
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
-{
- if (nv_device_is_pci(device))
- pci_unmap_page(device->pdev, addr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
-}
-
int
nv_device_get_irq(struct nouveau_device *device, bool stall)
{
@@ -560,6 +695,9 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
nv_engine(device)->sclass = nouveau_device_sclass;
list_add(&device->head, &nv_devices);
+
+ ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
+ &device->event);
done:
mutex_unlock(&nv_devices_mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
index 4b69bf56ed01..e34101a3490e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -22,55 +22,82 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <core/client.h>
#include <core/object.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
#include <subdev/clock.h>
#include "priv.h"
static int
-nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_info(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_info_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_info *args = data;
+ int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(object, "control pstate info size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate info vers %d\n",
+ args->v0.version);
+ } else
+ return ret;
if (clk) {
- args->count = clk->state_nr;
- args->ustate = clk->ustate;
- args->pstate = clk->pstate;
+ args->v0.count = clk->state_nr;
+ args->v0.ustate_ac = clk->ustate_ac;
+ args->v0.ustate_dc = clk->ustate_dc;
+ args->v0.pwrsrc = clk->pwrsrc;
+ args->v0.pstate = clk->pstate;
} else {
- args->count = 0;
- args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
- args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+ args->v0.count = 0;
+ args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
+ args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
+ args->v0.pwrsrc = -ENOSYS;
+ args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
return 0;
}
static int
-nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_attr_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_attr *args = data;
struct nouveau_clocks *domain;
struct nouveau_pstate *pstate;
struct nouveau_cstate *cstate;
int i = 0, j = -1;
u32 lo, hi;
-
- if ((size < sizeof(*args)) || !clk ||
- (args->state >= 0 && args->state >= clk->state_nr))
- return -EINVAL;
+ int ret;
+
+ nv_ioctl(object, "control pstate attr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate attr vers %d state %d "
+ "index %d\n",
+ args->v0.version, args->v0.state, args->v0.index);
+ if (!clk)
+ return -ENODEV;
+ if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
+ return -EINVAL;
+ if (args->v0.state >= clk->state_nr)
+ return -EINVAL;
+ } else
+ return ret;
domain = clk->domains;
while (domain->name != nv_clk_src_max) {
- if (domain->mname && ++j == args->index)
+ if (domain->mname && ++j == args->v0.index)
break;
domain++;
}
@@ -78,9 +105,9 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
if (domain->name == nv_clk_src_max)
return -EINVAL;
- if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+ if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) {
list_for_each_entry(pstate, &clk->states, head) {
- if (i++ == args->state)
+ if (i++ == args->v0.state)
break;
}
@@ -91,21 +118,21 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
hi = max(hi, cstate->domain[domain->name]);
}
- args->state = pstate->pstate;
+ args->v0.state = pstate->pstate;
} else {
lo = max(clk->read(clk, domain->name), 0);
hi = lo;
}
- snprintf(args->name, sizeof(args->name), "%s", domain->mname);
- snprintf(args->unit, sizeof(args->unit), "MHz");
- args->min = lo / domain->mdiv;
- args->max = hi / domain->mdiv;
+ snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname);
+ snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz");
+ args->v0.min = lo / domain->mdiv;
+ args->v0.max = hi / domain->mdiv;
- args->index = 0;
+ args->v0.index = 0;
while ((++domain)->name != nv_clk_src_max) {
if (domain->mname) {
- args->index = ++j;
+ args->v0.index = ++j;
break;
}
}
@@ -114,31 +141,65 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
}
static int
-nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_user(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_user_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_user *args = data;
+ int ret;
+
+ nv_ioctl(object, "control pstate user size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate user vers %d ustate %d "
+ "pwrsrc %d\n", args->v0.version,
+ args->v0.ustate, args->v0.pwrsrc);
+ if (!clk)
+ return -ENODEV;
+ } else
+ return ret;
+
+ if (args->v0.pwrsrc >= 0) {
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
+ } else {
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0);
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1);
+ }
- if (size < sizeof(*args) || !clk)
- return -EINVAL;
+ return ret;
+}
- return nouveau_clock_ustate(clk, args->state);
+static int
+nouveau_control_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NVIF_CONTROL_PSTATE_INFO:
+ return nouveau_control_mthd_pstate_info(object, data, size);
+ case NVIF_CONTROL_PSTATE_ATTR:
+ return nouveau_control_mthd_pstate_attr(object, data, size);
+ case NVIF_CONTROL_PSTATE_USER:
+ return nouveau_control_mthd_pstate_user(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
}
+static struct nouveau_ofuncs
+nouveau_control_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nouveau_control_mthd,
+};
+
struct nouveau_oclass
nouveau_control_oclass[] = {
- { .handle = NV_CONTROL_CLASS,
- .ofuncs = &nouveau_object_ofuncs,
- .omthds = (struct nouveau_omthds[]) {
- { NV_CONTROL_PSTATE_INFO,
- NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
- { NV_CONTROL_PSTATE_ATTR,
- NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
- { NV_CONTROL_PSTATE_USER,
- NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
- {},
- },
+ { .handle = NVIF_IOCTL_NEW_V0_CONTROL,
+ .ofuncs = &nouveau_control_ofuncs
},
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index a520029e25d9..377ec0b8851e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -68,20 +68,20 @@ gm100_identify(struct nouveau_device *device)
#endif
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
#if 0
- device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 40b29d0214cb..573b55f5c2f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -56,7 +56,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
@@ -74,7 +74,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 5f7c25ff523d..183a85a6204e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -58,7 +58,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
@@ -75,7 +75,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -94,7 +94,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -113,7 +113,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -132,7 +132,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -151,7 +151,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -170,7 +170,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -189,7 +189,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 75fed11bba0a..aa564c68a920 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -59,7 +59,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
@@ -78,7 +78,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -97,7 +97,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -116,7 +116,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 36919d7db7cc..11bd31da82ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -59,7 +59,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -78,7 +78,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -97,7 +97,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -117,7 +117,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -137,7 +137,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1130a62be2c7..e96c223cb797 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -65,7 +65,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -88,7 +88,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -111,7 +111,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -134,7 +134,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -157,7 +157,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -180,7 +180,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -203,7 +203,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -226,7 +226,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -249,7 +249,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -272,7 +272,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -295,7 +295,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -318,7 +318,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -341,7 +341,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -364,7 +364,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -387,7 +387,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -410,7 +410,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ef0b0bde1a91..932f84fae459 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -74,7 +74,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -99,7 +99,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -127,7 +127,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -155,7 +155,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -183,7 +183,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -211,7 +211,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -267,7 +267,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -295,7 +295,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -323,7 +323,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -350,9 +350,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -380,9 +380,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -409,9 +409,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -438,9 +438,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d55ed633b19..b4a2917ce555 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
@@ -102,14 +102,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -134,14 +134,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -165,14 +165,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -197,14 +197,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -229,14 +229,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
@@ -260,14 +260,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
@@ -292,14 +292,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
@@ -323,12 +323,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 2d1e97d4264f..cdf9147f32a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -103,14 +103,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -136,14 +136,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -158,15 +158,17 @@ nve0_identify(struct nouveau_device *device)
break;
case 0xea:
device->cname = "GK20A";
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
@@ -186,14 +188,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
@@ -219,17 +221,17 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
@@ -248,18 +250,18 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 9c38c5e40500..22d55f6cde50 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -22,23 +22,93 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/event.h>
+
#include "priv.h"
#include "outp.h"
#include "conn.h"
+int
+nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nouveau_disp *disp =
+ container_of(notify->event, typeof(*disp), vblank);
+ union {
+ struct nvif_notify_head_req_v0 v0;
+ } *req = data;
+ int ret;
+
+ if (nvif_unpack(req->v0, 0, 0, false)) {
+ notify->size = sizeof(struct nvif_notify_head_rep_v0);
+ if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
+ notify->types = 1;
+ notify->index = req->v0.head;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+void
+nouveau_disp_vblank(struct nouveau_disp *disp, int head)
+{
+ struct nvif_notify_head_rep_v0 rep = {};
+ nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
+}
+
static int
-nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index)
+nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify)
{
- struct nouveau_disp *disp = event->priv;
+ struct nouveau_disp *disp =
+ container_of(notify->event, typeof(*disp), hpd);
+ union {
+ struct nvif_notify_conn_req_v0 v0;
+ } *req = data;
struct nvkm_output *outp;
- list_for_each_entry(outp, &disp->outp, head) {
- if (outp->conn->index == index) {
- if (outp->conn->hpd.event)
- return 0;
- break;
+ int ret;
+
+ if (nvif_unpack(req->v0, 0, 0, false)) {
+ notify->size = sizeof(struct nvif_notify_conn_rep_v0);
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
+ if (ret = -ENODEV, outp->conn->hpd.event) {
+ notify->types = req->v0.mask;
+ notify->index = req->v0.conn;
+ ret = 0;
+ }
+ break;
+ }
}
}
- return -ENOSYS;
+
+ return ret;
+}
+
+static const struct nvkm_event_func
+nouveau_disp_hpd_func = {
+ .ctor = nouveau_disp_hpd_ctor
+};
+
+int
+nouveau_disp_ntfy(struct nouveau_object *object, u32 type,
+ struct nvkm_event **event)
+{
+ struct nouveau_disp *disp = (void *)object->engine;
+ switch (type) {
+ case NV04_DISP_NTFY_VBLANK:
+ *event = &disp->vblank;
+ return 0;
+ case NV04_DISP_NTFY_CONN:
+ *event = &disp->hpd;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
}
int
@@ -97,7 +167,8 @@ _nouveau_disp_dtor(struct nouveau_object *object)
struct nouveau_disp *disp = (void *)object;
struct nvkm_output *outp, *outt;
- nouveau_event_destroy(&disp->vblank);
+ nvkm_event_fini(&disp->vblank);
+ nvkm_event_fini(&disp->hpd);
if (disp->outp.next) {
list_for_each_entry_safe(outp, outt, &disp->outp, head) {
@@ -157,14 +228,11 @@ nouveau_disp_create_(struct nouveau_object *parent,
hpd = max(hpd, (u8)(dcbE.connector + 1));
}
- ret = nouveau_event_create(3, hpd, &disp->hpd);
+ ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd);
if (ret)
return ret;
- disp->hpd->priv = disp;
- disp->hpd->check = nouveau_disp_hpd_check;
-
- ret = nouveau_event_create(1, heads, &disp->vblank);
+ ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
index 4ffbc70ecf5a..3d1070228977 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -22,39 +22,41 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/event.h>
+
#include <subdev/gpio.h>
#include "conn.h"
#include "outp.h"
-static void
-nvkm_connector_hpd_work(struct work_struct *w)
+static int
+nvkm_connector_hpd(struct nvkm_notify *notify)
{
- struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work);
+ struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
struct nouveau_disp *disp = nouveau_disp(conn);
struct nouveau_gpio *gpio = nouveau_gpio(conn);
- u32 send = NVKM_HPD_UNPLUG;
- if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index))
- send = NVKM_HPD_PLUG;
- nouveau_event_trigger(disp->hpd, send, conn->index);
- nouveau_event_get(conn->hpd.event);
-}
+ const struct nvkm_gpio_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep;
+ int index = conn->index;
-static int
-nvkm_connector_hpd(void *data, u32 type, int index)
-{
- struct nvkm_connector *conn = data;
- DBG("HPD: %d\n", type);
- schedule_work(&conn->hpd.work);
- return NVKM_EVENT_DROP;
+ DBG("HPD: %d\n", line->mask);
+
+ if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+ rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
+ else
+ rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
+ rep.version = 0;
+
+ nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+ return NVKM_NOTIFY_KEEP;
}
int
_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
{
struct nvkm_connector *conn = (void *)object;
- if (conn->hpd.event)
- nouveau_event_put(conn->hpd.event);
+ nvkm_notify_put(&conn->hpd);
return nouveau_object_fini(&conn->base, suspend);
}
@@ -63,10 +65,8 @@ _nvkm_connector_init(struct nouveau_object *object)
{
struct nvkm_connector *conn = (void *)object;
int ret = nouveau_object_init(&conn->base);
- if (ret == 0) {
- if (conn->hpd.event)
- nouveau_event_get(conn->hpd.event);
- }
+ if (ret == 0)
+ nvkm_notify_get(&conn->hpd);
return ret;
}
@@ -74,7 +74,7 @@ void
_nvkm_connector_dtor(struct nouveau_object *object)
{
struct nvkm_connector *conn = (void *)object;
- nouveau_event_ref(NULL, &conn->hpd.event);
+ nvkm_notify_fini(&conn->hpd);
nouveau_object_destroy(&conn->base);
}
@@ -116,19 +116,24 @@ nvkm_connector_create_(struct nouveau_object *parent,
if ((info->hpd = ffs(info->hpd))) {
if (--info->hpd >= ARRAY_SIZE(hpd)) {
ERR("hpd %02x unknown\n", info->hpd);
- goto done;
+ return 0;
}
info->hpd = hpd[info->hpd];
ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
if (ret) {
ERR("func %02x lookup failed, %d\n", info->hpd, ret);
- goto done;
+ return 0;
}
- ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED,
- func.line, nvkm_connector_hpd,
- conn, &conn->hpd.event);
+ ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true,
+ &(struct nvkm_gpio_ntfy_req) {
+ .mask = NVKM_GPIO_TOGGLED,
+ .line = func.line,
+ },
+ sizeof(struct nvkm_gpio_ntfy_req),
+ sizeof(struct nvkm_gpio_ntfy_rep),
+ &conn->hpd);
if (ret) {
ERR("func %02x failed, %d\n", info->hpd, ret);
} else {
@@ -136,8 +141,6 @@ nvkm_connector_create_(struct nouveau_object *parent,
}
}
-done:
- INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
index 035ebeacbb1c..55e5f5c82c14 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -10,10 +10,7 @@ struct nvkm_connector {
struct nvbios_connE info;
int index;
- struct {
- struct nouveau_eventh *event;
- struct work_struct work;
- } hpd;
+ struct nvkm_notify hpd;
};
#define nvkm_connector_create(p,e,c,b,i,d) \
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index a66b27c0fcab..b36addff06a9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -32,13 +33,28 @@
#include "nv50.h"
int
-nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_dac_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
- (data & NV50_DISP_DAC_PWR_VSYNC) |
- (data & NV50_DISP_DAC_PWR_DATA) |
- (data & NV50_DISP_DAC_PWR_STATE);
- const u32 doff = (or * 0x800);
+ const u32 doff = outp->or * 0x800;
+ union {
+ struct nv50_disp_dac_pwr_v0 v0;
+ } *args = data;
+ u32 stat;
+ int ret;
+
+ nv_ioctl(object, "disp dac pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
+ "vsync %d hsync %d\n",
+ args->v0.version, args->v0.state, args->v0.data,
+ args->v0.vsync, args->v0.hsync);
+ stat = 0x00000040 * !args->v0.state;
+ stat |= 0x00000010 * !args->v0.data;
+ stat |= 0x00000004 * !args->v0.vsync;
+ stat |= 0x00000001 * !args->v0.hsync;
+ } else
+ return ret;
+
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -46,9 +62,24 @@ nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
}
int
-nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+nv50_dac_sense(NV50_DISP_MTHD_V1)
{
- const u32 doff = (or * 0x800);
+ union {
+ struct nv50_disp_dac_load_v0 v0;
+ } *args = data;
+ const u32 doff = outp->or * 0x800;
+ u32 loadval;
+ int ret;
+
+ nv_ioctl(object, "disp dac load size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp dac load vers %d data %08x\n",
+ args->v0.version, args->v0.data);
+ if (args->v0.data & 0xfff00000)
+ return -EINVAL;
+ loadval = args->v0.data;
+ } else
+ return ret;
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -61,38 +92,10 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+ nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
if (!(loadval & 0x80000000))
return -ETIMEDOUT;
- return (loadval & 0x38000000) >> 27;
-}
-
-int
-nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
- u32 *data = args;
- int ret;
-
- if (size < sizeof(u32))
- return -EINVAL;
-
- switch (mthd & ~0x3f) {
- case NV50_DISP_DAC_PWR:
- ret = priv->dac.power(priv, or, data[0]);
- break;
- case NV50_DISP_DAC_LOAD:
- ret = priv->dac.sense(priv, or, data[0]);
- if (ret >= 0) {
- data[0] = ret;
- ret = 0;
- }
- break;
- default:
- BUG_ON(1);
- }
-
- return ret;
+ args->v0.load = (loadval & 0x38000000) >> 27;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 5a5b59b21130..39890221b91c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -30,7 +30,7 @@
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "dport.h"
#include "outpdp.h"
@@ -335,7 +335,7 @@ nouveau_dp_train(struct work_struct *w)
int ret;
/* bring capabilities within encoder limits */
- if (nv_mclass(disp) < NVD0_DISP_CLASS)
+ if (nv_mclass(disp) < GF110_DISP)
outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -354,7 +354,7 @@ nouveau_dp_train(struct work_struct *w)
cfg--;
/* disable link interrupt handling during link training */
- nouveau_event_put(outp->irq);
+ nvkm_notify_put(&outp->irq);
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
@@ -395,5 +395,5 @@ nouveau_dp_train(struct work_struct *w)
DBG("training complete\n");
atomic_set(&outp->lt.done, 1);
wake_up(&outp->lt.wait);
- nouveau_event_get(outp->irq);
+ nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index 9fc7447fec90..d54da8b5f87e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,17 +35,17 @@
static struct nouveau_oclass
gm107_disp_sclass[] = {
- { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
gm107_disp_base_oclass[] = {
- { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GM107_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -93,9 +93,11 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index a19e7d79b847..8b4e06abe533 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -22,25 +22,37 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+nva3_hda_eld(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x800);
- int i;
+ union {
+ struct nv50_disp_sor_hda_eld_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x800;
+ int ret, i;
- if (data && data[0]) {
+ nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ if (size > 0x60)
+ return -E2BIG;
+ } else
+ return ret;
+
+ if (size && args->v0.data[0]) {
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
for (; i < 0x60; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
- if (data) {
+ if (size) {
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
} else {
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index 717639386ced..baf558fc12fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -33,19 +34,30 @@
#include "nv50.h"
int
-nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+nvd0_hda_eld(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x030);
- int i;
+ union {
+ struct nv50_disp_sor_hda_eld_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x030;
+ int ret, i;
- if (data && data[0]) {
+ nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ if (size > 0x60)
+ return -E2BIG;
+ } else
+ return ret;
+
+ if (size && args->v0.data[0]) {
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
- if (data) {
+ if (size) {
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
} else {
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 7fdade6e604d..fa276dede9cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -22,17 +22,38 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nv84_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ ctrl |= 0x1f000000; /* ??? */
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
index db8c6fd46278..57eeed1d1942 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -22,17 +22,38 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nva3_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x800);
+ const u32 soff = outp->or * 0x800;
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ ctrl |= 0x1f000000; /* ??? */
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
index 5151bb261832..3106d295b48d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -22,17 +22,37 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
@@ -54,7 +74,7 @@ nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
/* HDMI_CTRL */
- nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
/* NFI, audio doesn't work without it though.. */
nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a32666ed0c47..366f315fc9a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,60 +24,100 @@
#include "priv.h"
+#include <core/client.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
struct nv04_disp_priv {
struct nouveau_disp base;
};
static int
-nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv,
+ void *data, u32 size, int head)
{
- struct nv04_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV04_DISP_MTHD_HEAD);
+ const u32 hoff = head * 0x2000;
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
u32 line;
+ int ret;
+
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
+ args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
+ args->v0.vblanke = args->v0.vtotal - 1;
+
+ args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
+ args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
+ args->v0.hblanke = args->v0.htotal - 1;
+
+ /*
+ * If output is vga instead of digital then vtotal/htotal is
+ * invalid so we have to give up and trigger the timestamping
+ * fallback in the drm core.
+ */
+ if (!args->v0.vtotal || !args->v0.htotal)
+ return -ENOTSUPP;
+
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + hoff);
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline = (line & 0xffff0000) >> 16;
+ args->v0.vline = (line & 0x0000ffff);
+ } else
+ return ret;
- if (size < sizeof(*args))
- return -EINVAL;
-
- args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
- args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
- args->vblanke = args->vtotal - 1;
-
- args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
- args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
- args->hblanke = args->htotal - 1;
-
- /*
- * If output is vga instead of digital then vtotal/htotal is invalid
- * so we have to give up and trigger the timestamping fallback in the
- * drm core.
- */
- if (!args->vtotal || !args->htotal)
- return -ENOTSUPP;
-
- args->time[0] = ktime_to_ns(ktime_get());
- line = nv_rd32(priv, 0x600868 + (head * 0x2000));
- args->time[1] = ktime_to_ns(ktime_get());
- args->hline = (line & 0xffff0000) >> 16;
- args->vline = (line & 0x0000ffff);
return 0;
}
-#define HEAD_MTHD(n) (n), (n) + 0x01
+static int
+nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+{
+ union {
+ struct nv04_disp_mthd_v0 v0;
+ } *args = data;
+ struct nv04_disp_priv *priv = (void *)object->engine;
+ int head, ret;
+
+ nv_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ return ret;
-static struct nouveau_omthds
-nv04_disp_omthds[] = {
- { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
- {}
+ if (head < 0 || head >= 2)
+ return -ENXIO;
+
+ switch (mthd) {
+ case NV04_DISP_SCANOUTPOS:
+ return nv04_disp_scanoutpos(object, priv, data, size, head);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static struct nouveau_ofuncs
+nv04_disp_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nv04_disp_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
- { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
+ { NV04_DISP, &nv04_disp_ofuncs },
{},
};
@@ -86,17 +126,26 @@ nv04_disp_sclass[] = {
******************************************************************************/
static void
-nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
{
- nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
}
static void
-nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head)
+nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
{
- nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
}
+static const struct nvkm_event_func
+nv04_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nv04_disp_vblank_init,
+ .fini = nv04_disp_vblank_fini,
+};
+
static void
nv04_disp_intr(struct nouveau_subdev *subdev)
{
@@ -106,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 pvideo;
if (crtc0 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nouveau_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nouveau_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
@@ -140,9 +189,6 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nv04_disp_vblank_enable;
- priv->base.vblank->disable = nv04_disp_vblank_disable;
return 0;
}
@@ -155,4 +201,5 @@ nv04_disp_oclass = &(struct nouveau_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .vblank = &nv04_disp_vblank_func,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2283c442a10d..4b5bb5d58a54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -23,10 +23,12 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/parent.h>
#include <core/handle.h>
-#include <core/class.h>
#include <core/enum.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -43,14 +45,16 @@
* EVO channel base class
******************************************************************************/
-int
+static int
nv50_disp_chan_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
+ struct nouveau_oclass *oclass, int head,
int length, void **pobject)
{
+ const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
struct nv50_disp_base *base = (void *)parent;
struct nv50_disp_chan *chan;
+ int chid = impl->chid + head;
int ret;
if (base->chan & (1 << chid))
@@ -63,12 +67,14 @@ nv50_disp_chan_create_(struct nouveau_object *parent,
chan = *pobject;
if (ret)
return ret;
-
chan->chid = chid;
+
+ nv_parent(chan)->object_attach = impl->attach;
+ nv_parent(chan)->object_detach = impl->detach;
return 0;
}
-void
+static void
nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
{
struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
@@ -76,6 +82,16 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
nouveau_namedb_destroy(&chan->base);
}
+int
+nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nv50_disp_chan *chan = (void *)object;
+ *addr = nv_device_resource_start(nv_device(object), 0) +
+ 0x640000 + (chan->chid * 0x1000);
+ *size = 0x001000;
+ return 0;
+}
+
u32
nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
{
@@ -115,16 +131,16 @@ nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
nouveau_ramht_remove(base->ramht, cookie);
}
-int
+static int
nv50_disp_dmac_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ struct nouveau_oclass *oclass, u32 pushbuf, int head,
int length, void **pobject)
{
struct nv50_disp_dmac *dmac;
int ret;
- ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ ret = nv50_disp_chan_create_(parent, engine, oclass, head,
length, pobject);
dmac = *pobject;
if (ret)
@@ -397,27 +413,32 @@ nv50_disp_mast_mthd_chan = {
}
};
-static int
+int
nv50_disp_mast_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_mast_class *args = data;
+ union {
+ struct nv50_disp_core_channel_dma_v0 v0;
+ } *args = data;
struct nv50_disp_dmac *mast;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create disp core channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp core channel dma vers %d "
+ "pushbuf %08x\n",
+ args->v0.version, args->v0.pushbuf);
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
0, sizeof(*mast), (void **)&mast);
*pobject = nv_object(mast);
if (ret)
return ret;
- nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
@@ -479,14 +500,18 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
return nv50_disp_chan_fini(&mast->base, suspend);
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_mast_ofuncs = {
- .ctor = nv50_disp_mast_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_mast_init,
- .fini = nv50_disp_mast_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_mast_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_mast_init,
+ .base.fini = nv50_disp_mast_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 0,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -543,39 +568,51 @@ nv50_disp_sync_mthd_chan = {
}
};
-static int
+int
nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_sync_class *args = data;
+ union {
+ struct nv50_disp_base_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp base channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp base channel dma vers %d "
+ "pushbuf %08x head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 1 + args->head, sizeof(*dmac),
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
+ args->v0.head, sizeof(*dmac),
(void **)&dmac);
*pobject = nv_object(dmac);
if (ret)
return ret;
- nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_sync_ofuncs = {
- .ctor = nv50_disp_sync_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_dmac_init,
- .fini = nv50_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_sync_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_dmac_init,
+ .base.fini = nv50_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 1,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -620,39 +657,51 @@ nv50_disp_ovly_mthd_chan = {
}
};
-static int
+int
nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_ovly_class *args = data;
+ union {
+ struct nv50_disp_overlay_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp overlay channel dma vers %d "
+ "pushbuf %08x head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 3 + args->head, sizeof(*dmac),
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
+ args->v0.head, sizeof(*dmac),
(void **)&dmac);
*pobject = nv_object(dmac);
if (ret)
return ret;
- nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_ovly_ofuncs = {
- .ctor = nv50_disp_ovly_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_dmac_init,
- .fini = nv50_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_ovly_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_dmac_init,
+ .base.fini = nv50_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 3,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -662,14 +711,14 @@ nv50_disp_ovly_ofuncs = {
static int
nv50_disp_pioc_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
+ struct nouveau_oclass *oclass, int head,
int length, void **pobject)
{
- return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ return nv50_disp_chan_create_(parent, engine, oclass, head,
length, pobject);
}
-static void
+void
nv50_disp_pioc_dtor(struct nouveau_object *object)
{
struct nv50_disp_pioc *pioc = (void *)object;
@@ -727,20 +776,29 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
* EVO immediate overlay channel objects
******************************************************************************/
-static int
+int
nv50_disp_oimm_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_oimm_class *args = data;
+ union {
+ struct nv50_disp_overlay_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_pioc *pioc;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp overlay size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp overlay vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
sizeof(*pioc), (void **)&pioc);
*pobject = nv_object(pioc);
if (ret)
@@ -749,34 +807,45 @@ nv50_disp_oimm_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_oimm_ofuncs = {
- .ctor = nv50_disp_oimm_ctor,
- .dtor = nv50_disp_pioc_dtor,
- .init = nv50_disp_pioc_init,
- .fini = nv50_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_oimm_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nv50_disp_pioc_init,
+ .base.fini = nv50_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 5,
};
/*******************************************************************************
* EVO cursor channel objects
******************************************************************************/
-static int
+int
nv50_disp_curs_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_curs_class *args = data;
+ union {
+ struct nv50_disp_cursor_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_pioc *pioc;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp cursor size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp cursor vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
sizeof(*pioc), (void **)&pioc);
*pobject = nv_object(pioc);
if (ret)
@@ -785,14 +854,16 @@ nv50_disp_curs_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_curs_ofuncs = {
- .ctor = nv50_disp_curs_ctor,
- .dtor = nv50_disp_pioc_dtor,
- .init = nv50_disp_pioc_init,
- .fini = nv50_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_curs_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nv50_disp_pioc_init,
+ .base.fini = nv50_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 7,
};
/*******************************************************************************
@@ -800,47 +871,162 @@ nv50_disp_curs_ofuncs = {
******************************************************************************/
int
-nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV50_DISP_MTHD_HEAD);
- u32 blanke, blanks, total;
+ const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+ const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+ const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
+ return ret;
- if (size < sizeof(*args) || head >= priv->head.nr)
- return -EINVAL;
- blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
- blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
- total = nv_rd32(priv, 0x610afc + (head * 0x540));
-
- args->vblanke = (blanke & 0xffff0000) >> 16;
- args->hblanke = (blanke & 0x0000ffff);
- args->vblanks = (blanks & 0xffff0000) >> 16;
- args->hblanks = (blanks & 0x0000ffff);
- args->vtotal = ( total & 0xffff0000) >> 16;
- args->htotal = ( total & 0x0000ffff);
-
- args->time[0] = ktime_to_ns(ktime_get());
- args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
- args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
return 0;
}
-static void
-nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
+int
+nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
{
- nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
-}
+ const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
+ union {
+ struct nv50_disp_mthd_v0 v0;
+ struct nv50_disp_mthd_v1 v1;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nvkm_output *outp = NULL;
+ struct nvkm_output *temp;
+ u16 type, mask = 0;
+ int head, ret;
-static void
-nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
-{
- nv_mask(event->priv, 0x61002c, (4 << head), 0);
+ if (mthd != NV50_DISP_MTHD)
+ return -EINVAL;
+
+ nv_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ if (nvif_unpack(args->v1, 1, 1, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x "
+ "type %04x mask %04x\n",
+ args->v1.version, args->v1.method,
+ args->v1.hasht, args->v1.hashm);
+ mthd = args->v1.method;
+ type = args->v1.hasht;
+ mask = args->v1.hashm;
+ head = ffs((mask >> 8) & 0x0f) - 1;
+ } else
+ return ret;
+
+ if (head < 0 || head >= priv->head.nr)
+ return -ENXIO;
+
+ if (mask) {
+ list_for_each_entry(temp, &priv->base.outp, head) {
+ if ((temp->info.hasht == type) &&
+ (temp->info.hashm & mask) == mask) {
+ outp = temp;
+ break;
+ }
+ }
+ if (outp == NULL)
+ return -ENXIO;
+ }
+
+ switch (mthd) {
+ case NV50_DISP_SCANOUTPOS:
+ return impl->head.scanoutpos(object, priv, data, size, head);
+ default:
+ break;
+ }
+
+ switch (mthd * !!outp) {
+ case NV50_DISP_MTHD_V1_DAC_PWR:
+ return priv->dac.power(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_DAC_LOAD:
+ return priv->dac.sense(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_PWR:
+ return priv->sor.power(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+ if (!priv->sor.hda_eld)
+ return -ENODEV;
+ return priv->sor.hda_eld(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+ if (!priv->sor.hdmi)
+ return -ENODEV;
+ return priv->sor.hdmi(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+ union {
+ struct nv50_disp_sor_lvds_script_v0 v0;
+ } *args = data;
+ nv_ioctl(object, "disp sor lvds script size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor lvds script "
+ "vers %d name %04x\n",
+ args->v0.version, args->v0.script);
+ priv->sor.lvdsconf = args->v0.script;
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ union {
+ struct nv50_disp_sor_dp_pwr_v0 v0;
+ } *args = data;
+ nv_ioctl(object, "disp sor dp pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (args->v0.state == 0) {
+ nvkm_notify_put(&outpdp->irq);
+ ((struct nvkm_output_dp_impl *)nv_oclass(outp))
+ ->lnk_pwr(outpdp, 0);
+ atomic_set(&outpdp->lt.done, 0);
+ return 0;
+ } else
+ if (args->v0.state != 0) {
+ nvkm_output_dp_train(&outpdp->base, 0, true);
+ return 0;
+ }
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_PIOR_PWR:
+ if (!priv->pior.power)
+ return -ENODEV;
+ return priv->pior.power(object, priv, data, size, head, outp);
+ default:
+ break;
+ }
+
+ return -EINVAL;
}
-static int
+int
nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -856,14 +1042,11 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nv50_disp_base_vblank_enable;
- priv->base.vblank->disable = nv50_disp_base_vblank_disable;
return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
&base->ramht);
}
-static void
+void
nv50_disp_base_dtor(struct nouveau_object *object)
{
struct nv50_disp_base *base = (void *)object;
@@ -958,34 +1141,23 @@ nv50_disp_base_ofuncs = {
.dtor = nv50_disp_base_dtor,
.init = nv50_disp_base_init,
.fini = nv50_disp_base_fini,
-};
-
-static struct nouveau_omthds
-nv50_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
+ .mthd = nv50_disp_base_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nv50_disp_base_oclass[] = {
- { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ { NV50_DISP, &nv50_disp_base_ofuncs },
{}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
@@ -1005,7 +1177,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
int ret = -EBUSY;
/* no context needed for channel objects... */
- if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ if (nv_mclass(parent) != NV_DEVICE) {
atomic_inc(&parent->refcount);
*pobject = parent;
return 1;
@@ -1040,6 +1212,27 @@ nv50_disp_cclass = {
* Display engine implementation
******************************************************************************/
+static void
+nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x61002c, (4 << head), 0);
+}
+
+static void
+nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x61002c, (4 << head), (4 << head));
+}
+
+const struct nvkm_event_func
+nv50_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nv50_disp_vblank_init,
+ .fini = nv50_disp_vblank_fini,
+};
+
static const struct nouveau_enum
nv50_disp_intr_error_type[] = {
{ 3, "ILLEGAL_MTHD" },
@@ -1381,7 +1574,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
int TU, VTUi, VTUf, VTUa;
u64 link_data_rate, link_ratio, unk;
u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, bits, r;
+ u32 link_nr, link_bw, bits;
/* calculate packed data rate for each lane */
if (dpctrl > 0x00030000) link_nr = 4;
@@ -1401,7 +1594,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
/* calculate ratio of packed data rate to link symbol rate */
link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
+ do_div(link_ratio, link_bw);
for (TU = 64; TU >= 32; TU--) {
/* calculate average number of valid symbols in each TU */
@@ -1462,8 +1655,8 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
/* XXX close to vbios numbers, but not right */
unk = (symbol - link_ratio) * bestTU;
unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
+ do_div(unk, symbol);
+ do_div(unk, symbol);
unk += 6;
nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
@@ -1654,13 +1847,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nouveau_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nouveau_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
@@ -1718,9 +1911,11 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv50_disp_mast_mthd_chan,
.mthd.base = &nv50_disp_sync_mthd_chan,
.mthd.ovly = &nv50_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1a886472b6f5..8ab14461f70c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -14,15 +14,10 @@
#include "outp.h"
#include "outpdp.h"
-struct nv50_disp_impl {
- struct nouveau_disp_impl base;
- struct {
- const struct nv50_disp_mthd_chan *core;
- const struct nv50_disp_mthd_chan *base;
- const struct nv50_disp_mthd_chan *ovly;
- int prev;
- } mthd;
-};
+#define NV50_DISP_MTHD_ struct nouveau_object *object, \
+ struct nv50_disp_priv *priv, void *data, u32 size
+#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
+#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
struct nv50_disp_priv {
struct nouveau_disp base;
@@ -36,44 +31,52 @@ struct nv50_disp_priv {
} head;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int dac, u32 data);
- int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*sense)(NV50_DISP_MTHD_V1);
} dac;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int sor, u32 data);
- int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
- int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*hda_eld)(NV50_DISP_MTHD_V1);
+ int (*hdmi)(NV50_DISP_MTHD_V1);
u32 lvdsconf;
} sor;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ int (*power)(NV50_DISP_MTHD_V1);
u8 type[3];
} pior;
};
-#define HEAD_MTHD(n) (n), (n) + 0x03
-
-int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+struct nv50_disp_impl {
+ struct nouveau_disp_impl base;
+ struct {
+ const struct nv50_disp_mthd_chan *core;
+ const struct nv50_disp_mthd_chan *base;
+ const struct nv50_disp_mthd_chan *ovly;
+ int prev;
+ } mthd;
+ struct {
+ int (*scanoutpos)(NV50_DISP_MTHD_V0);
+ } head;
+};
-#define DAC_MTHD(n) (n), (n) + 0x03
+int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_dac_power(struct nv50_disp_priv *, int, u32);
-int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
-#define SOR_MTHD(n) (n), (n) + 0x3f
+int nv50_dac_power(NV50_DISP_MTHD_V1);
+int nv50_dac_sense(NV50_DISP_MTHD_V1);
-int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
-int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nva3_hda_eld(NV50_DISP_MTHD_V1);
+int nvd0_hda_eld(NV50_DISP_MTHD_V1);
-int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
-int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
-int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+int nv50_sor_power(NV50_DISP_MTHD_V1);
int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
u32, struct dcb_output *);
@@ -93,10 +96,7 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
struct dcb_output *);
-#define PIOR_MTHD(n) (n), (n) + 0x03
-
-int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+int nv50_pior_power(NV50_DISP_MTHD_V1);
struct nv50_disp_base {
struct nouveau_parent base;
@@ -104,14 +104,19 @@ struct nv50_disp_base {
u32 chan;
};
+struct nv50_disp_chan_impl {
+ struct nouveau_ofuncs base;
+ int chid;
+ int (*attach)(struct nouveau_object *, struct nouveau_object *, u32);
+ void (*detach)(struct nouveau_object *, int);
+};
+
struct nv50_disp_chan {
struct nouveau_namedb base;
int chid;
};
-int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, int, void **);
-void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *);
u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
@@ -120,20 +125,20 @@ void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
#define nv50_disp_chan_fini(a,b) \
nouveau_namedb_fini(&(a)->base, (b))
-int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32, int, int, void **);
-void nv50_disp_dmac_dtor(struct nouveau_object *);
-
struct nv50_disp_dmac {
struct nv50_disp_chan base;
struct nouveau_dmaobj *pushdma;
u32 push;
};
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
struct nv50_disp_pioc {
struct nv50_disp_chan base;
};
+void nv50_disp_pioc_dtor(struct nouveau_object *);
+
struct nv50_disp_mthd_list {
u32 mthd;
u32 addr;
@@ -154,47 +159,67 @@ struct nv50_disp_mthd_chan {
} data[];
};
-extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
+int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
-extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
+int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
-extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
+int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
-extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
+int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
+int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv50_disp_base_dtor(struct nouveau_object *);
+extern struct nouveau_omthds nv50_disp_base_omthds[];
extern struct nouveau_oclass nv50_disp_cclass;
void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
const struct nv50_disp_mthd_chan *);
void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
+extern const struct nvkm_event_func nv50_disp_vblank_func;
extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
-extern struct nouveau_omthds nv84_disp_base_omthds[];
extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
-extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
-extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
-extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
-extern struct nouveau_omthds nvd0_disp_base_omthds[];
+extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
+extern const struct nvkm_event_func nvd0_disp_vblank_func;
extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 1cc62e434683..788ced1b6182 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -204,31 +204,17 @@ nv84_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nv84_disp_sclass[] = {
- { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-struct nouveau_omthds
-nv84_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nv84_disp_base_oclass[] = {
- { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ { G82_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -276,9 +262,11 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index 4f718a9f5aef..fa79de906eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -63,32 +63,17 @@ nv94_disp_mast_mthd_chan = {
static struct nouveau_oclass
nv94_disp_sclass[] = {
- { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_omthds
-nv94_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nv94_disp_base_oclass[] = {
- { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ { GT206_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -143,9 +128,11 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 6237a9a36f70..7af15f5d48dc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -80,17 +80,17 @@ nva0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nva0_disp_sclass[] = {
- { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nva0_disp_base_oclass[] = {
- { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ { GT200_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -138,9 +138,11 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nva0_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 019124d4782b..6bd39448f8da 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,33 +35,17 @@
static struct nouveau_oclass
nva3_disp_sclass[] = {
- { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_omthds
-nva3_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nva3_disp_base_oclass[] = {
- { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ { GT214_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -110,9 +94,11 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index fa30d8196f35..a4bb3c774ee1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -23,9 +23,11 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/parent.h>
#include <core/handle.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <engine/disp.h>
@@ -265,30 +267,6 @@ nvd0_disp_mast_mthd_chan = {
};
static int
-nvd0_disp_mast_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_mast_class *args = data;
- struct nv50_disp_dmac *mast;
- int ret;
-
- if (size < sizeof(*args))
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 0, sizeof(*mast), (void **)&mast);
- *pobject = nv_object(mast);
- if (ret)
- return ret;
-
- nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-static int
nvd0_disp_mast_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
@@ -342,14 +320,18 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
return nv50_disp_chan_fini(&mast->base, suspend);
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_mast_ofuncs = {
- .ctor = nvd0_disp_mast_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_mast_init,
- .fini = nvd0_disp_mast_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_mast_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_mast_init,
+ .base.fini = nvd0_disp_mast_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 0,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -431,40 +413,18 @@ nvd0_disp_sync_mthd_chan = {
}
};
-static int
-nvd0_disp_sync_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_sync_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 1 + args->head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_sync_ofuncs = {
- .ctor = nvd0_disp_sync_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_dmac_init,
- .fini = nvd0_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_sync_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_dmac_init,
+ .base.fini = nvd0_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 1,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -533,40 +493,18 @@ nvd0_disp_ovly_mthd_chan = {
}
};
-static int
-nvd0_disp_ovly_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_ovly_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 5 + args->head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_ovly_ofuncs = {
- .ctor = nvd0_disp_ovly_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_dmac_init,
- .fini = nvd0_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_ovly_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_dmac_init,
+ .base.fini = nvd0_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 5,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -574,23 +512,6 @@ nvd0_disp_ovly_ofuncs = {
******************************************************************************/
static int
-nvd0_disp_pioc_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
- int length, void **pobject)
-{
- return nv50_disp_chan_create_(parent, engine, oclass, chid,
- length, pobject);
-}
-
-static void
-nvd0_disp_pioc_dtor(struct nouveau_object *object)
-{
- struct nv50_disp_pioc *pioc = (void *)object;
- nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
nvd0_disp_pioc_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
@@ -643,152 +564,68 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
* EVO immediate overlay channel objects
******************************************************************************/
-static int
-nvd0_disp_oimm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_oimm_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_oimm_ofuncs = {
- .ctor = nvd0_disp_oimm_ctor,
- .dtor = nvd0_disp_pioc_dtor,
- .init = nvd0_disp_pioc_init,
- .fini = nvd0_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_oimm_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nvd0_disp_pioc_init,
+ .base.fini = nvd0_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 9,
};
/*******************************************************************************
* EVO cursor channel objects
******************************************************************************/
-static int
-nvd0_disp_curs_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_curs_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_curs_ofuncs = {
- .ctor = nvd0_disp_curs_ctor,
- .dtor = nvd0_disp_pioc_dtor,
- .init = nvd0_disp_pioc_init,
- .fini = nvd0_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_curs_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nvd0_disp_pioc_init,
+ .base.fini = nvd0_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 13,
};
/*******************************************************************************
* Base display object
******************************************************************************/
-static int
-nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV50_DISP_MTHD_HEAD);
- u32 blanke, blanks, total;
-
- if (size < sizeof(*args) || head >= priv->head.nr)
- return -EINVAL;
-
- total = nv_rd32(priv, 0x640414 + (head * 0x300));
- blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
- blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
-
- args->vblanke = (blanke & 0xffff0000) >> 16;
- args->hblanke = (blanke & 0x0000ffff);
- args->vblanks = (blanks & 0xffff0000) >> 16;
- args->hblanks = (blanks & 0x0000ffff);
- args->vtotal = ( total & 0xffff0000) >> 16;
- args->htotal = ( total & 0x0000ffff);
-
- args->time[0] = ktime_to_ns(ktime_get());
- args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
- args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
- return 0;
-}
-
-static void
-nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
-{
- nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
+int
+nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
{
- nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-static int
-nvd0_disp_base_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_base *base;
+ const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
+ const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+ const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
int ret;
- ret = nouveau_parent_create(parent, engine, oclass, 0,
- priv->sclass, 0, &base);
- *pobject = nv_object(base);
- if (ret)
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
return ret;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
- priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
-
- return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
- &base->ramht);
-}
-
-static void
-nvd0_disp_base_dtor(struct nouveau_object *object)
-{
- struct nv50_disp_base *base = (void *)object;
- nouveau_ramht_ref(NULL, &base->ramht);
- nouveau_parent_destroy(&base->base);
+ return 0;
}
static int
@@ -874,41 +711,27 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
struct nouveau_ofuncs
nvd0_disp_base_ofuncs = {
- .ctor = nvd0_disp_base_ctor,
- .dtor = nvd0_disp_base_dtor,
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
.init = nvd0_disp_base_init,
.fini = nvd0_disp_base_fini,
-};
-
-struct nouveau_omthds
-nvd0_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
+ .mthd = nv50_disp_base_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nvd0_disp_base_oclass[] = {
- { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GF110_DISP, &nvd0_disp_base_ofuncs },
{}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
@@ -916,6 +739,27 @@ nvd0_disp_sclass[] = {
* Display engine implementation
******************************************************************************/
+static void
+nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+const struct nvkm_event_func
+nvd0_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nvd0_disp_vblank_init,
+ .fini = nvd0_disp_vblank_fini,
+};
+
static struct nvkm_output *
exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -1343,7 +1187,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nouveau_event_trigger(priv->base.vblank, 1, i);
+ nouveau_disp_vblank(&priv->base, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -1396,9 +1240,11 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nvd0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nvd0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 11328e3f5df1..47fef1e398c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -200,17 +200,17 @@ nve0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nve0_disp_sclass[] = {
- { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nve0_disp_base_oclass[] = {
- { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GK104_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -258,9 +258,11 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 104388081d73..04bda4ac4ed3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,17 +35,17 @@
static struct nouveau_oclass
nvf0_disp_sclass[] = {
- { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nvf0_disp_base_oclass[] = {
- { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GK110_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -93,9 +93,11 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index ad9ba7ccec7f..a5ff00a9cedc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -78,6 +78,7 @@ nvkm_output_create_(struct nouveau_object *parent,
outp->info = *dcbE;
outp->index = index;
+ outp->or = ffs(outp->info.or) - 1;
DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
index bc76fbf85710..187f435ad0e2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
@@ -9,6 +9,7 @@ struct nvkm_output {
struct dcb_output info;
int index;
+ int or;
struct nouveau_i2c_port *port;
struct nouveau_i2c_port *edid;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index eb2d7789555d..6f6e2a898270 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -22,6 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/event.h>
+
#include <subdev/i2c.h>
#include "outpdp.h"
@@ -86,7 +89,7 @@ done:
atomic_set(&outp->lt.done, 0);
schedule_work(&outp->lt.work);
} else {
- nouveau_event_get(outp->irq);
+ nvkm_notify_get(&outp->irq);
}
if (wait) {
@@ -133,46 +136,59 @@ nvkm_output_dp_detect(struct nvkm_output_dp *outp)
}
}
-static void
-nvkm_output_dp_service_work(struct work_struct *work)
+static int
+nvkm_output_dp_hpd(struct nvkm_notify *notify)
{
- struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work);
- struct nouveau_disp *disp = nouveau_disp(outp);
- int type = atomic_xchg(&outp->pending, 0);
- u32 send = 0;
-
- if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) {
- nvkm_output_dp_detect(outp);
- if (type & NVKM_I2C_UNPLUG)
- send |= NVKM_HPD_UNPLUG;
- if (type & NVKM_I2C_PLUG)
- send |= NVKM_HPD_PLUG;
- nouveau_event_get(outp->base.conn->hpd.event);
- }
-
- if (type & NVKM_I2C_IRQ) {
- nvkm_output_dp_train(&outp->base, 0, true);
- send |= NVKM_HPD_IRQ;
+ struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
+ struct nvkm_output_dp *outp;
+ struct nouveau_disp *disp = nouveau_disp(conn);
+ const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep = {};
+
+ list_for_each_entry(outp, &disp->outp, base.head) {
+ if (outp->base.conn == conn &&
+ outp->info.type == DCB_OUTPUT_DP) {
+ DBG("HPD: %d\n", line->mask);
+ nvkm_output_dp_detect(outp);
+
+ if (line->mask & NVKM_I2C_UNPLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+ if (line->mask & NVKM_I2C_PLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+
+ nvkm_event_send(&disp->hpd, rep.mask, conn->index,
+ &rep, sizeof(rep));
+ return NVKM_NOTIFY_KEEP;
+ }
}
- nouveau_event_trigger(disp->hpd, send, outp->base.info.connector);
+ WARN_ON(1);
+ return NVKM_NOTIFY_DROP;
}
static int
-nvkm_output_dp_service(void *data, u32 type, int index)
+nvkm_output_dp_irq(struct nvkm_notify *notify)
{
- struct nvkm_output_dp *outp = data;
- DBG("HPD: %d\n", type);
- atomic_or(type, &outp->pending);
- schedule_work(&outp->work);
- return NVKM_EVENT_DROP;
+ struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep = {
+ .mask = NVIF_NOTIFY_CONN_V0_IRQ,
+ };
+ int index = outp->base.info.connector;
+
+ DBG("IRQ: %d\n", line->mask);
+ nvkm_output_dp_train(&outp->base, 0, true);
+
+ nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+ return NVKM_NOTIFY_DROP;
}
int
_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
{
struct nvkm_output_dp *outp = (void *)object;
- nouveau_event_put(outp->irq);
+ nvkm_notify_put(&outp->irq);
nvkm_output_dp_enable(outp, false);
return nvkm_output_fini(&outp->base, suspend);
}
@@ -189,7 +205,7 @@ void
_nvkm_output_dp_dtor(struct nouveau_object *object)
{
struct nvkm_output_dp *outp = (void *)object;
- nouveau_event_ref(NULL, &outp->irq);
+ nvkm_notify_fini(&outp->irq);
nvkm_output_destroy(&outp->base);
}
@@ -213,7 +229,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
if (ret)
return ret;
- nouveau_event_ref(NULL, &outp->base.conn->hpd.event);
+ nvkm_notify_fini(&outp->base.conn->hpd);
/* access to the aux channel is not optional... */
if (!outp->base.edid) {
@@ -238,20 +254,28 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
atomic_set(&outp->lt.done, 0);
/* link maintenance */
- ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index,
- nvkm_output_dp_service, outp, &outp->irq);
+ ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true,
+ &(struct nvkm_i2c_ntfy_req) {
+ .mask = NVKM_I2C_IRQ,
+ .port = outp->base.edid->index,
+ },
+ sizeof(struct nvkm_i2c_ntfy_req),
+ sizeof(struct nvkm_i2c_ntfy_rep),
+ &outp->irq);
if (ret) {
ERR("error monitoring aux irq event: %d\n", ret);
return ret;
}
- INIT_WORK(&outp->work, nvkm_output_dp_service_work);
-
/* hotplug detect, replaces gpio-based mechanism with aux events */
- ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
- outp->base.edid->index,
- nvkm_output_dp_service, outp,
- &outp->base.conn->hpd.event);
+ ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true,
+ &(struct nvkm_i2c_ntfy_req) {
+ .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
+ .port = outp->base.edid->index,
+ },
+ sizeof(struct nvkm_i2c_ntfy_req),
+ sizeof(struct nvkm_i2c_ntfy_rep),
+ &outp->base.conn->hpd);
if (ret) {
ERR("error monitoring aux hpd events: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
index ff33ba12cb67..1fac367cc867 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
@@ -12,10 +12,7 @@ struct nvkm_output_dp {
struct nvbios_dpout info;
u8 version;
- struct nouveau_eventh *irq;
- struct nouveau_eventh *hpd;
- struct work_struct work;
- atomic_t pending;
+ struct nvkm_notify irq;
bool present;
u8 dpcd[16];
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
index fe0f256f11bf..d00f89a468a7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -143,38 +144,29 @@ nv50_pior_dp_impl = {
*****************************************************************************/
int
-nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_pior_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
- const u32 soff = (or * 0x800);
+ const u32 soff = outp->or * 0x800;
+ union {
+ struct nv50_disp_pior_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl, type;
+ int ret;
+
+ nv_ioctl(object, "disp pior pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+ args->v0.version, args->v0.state, args->v0.type);
+ if (args->v0.type > 0x0f)
+ return -EINVAL;
+ ctrl = !!args->v0.state;
+ type = args->v0.type;
+ } else
+ return ret;
+
nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+ nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ priv->pior.type[outp->or] = type;
return 0;
}
-
-int
-nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
- const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
- u32 *data = args;
- int ret;
-
- if (size < sizeof(u32))
- return -EINVAL;
-
- mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
- mthd &= ~NV50_DISP_PIOR_MTHD_OR;
- switch (mthd) {
- case NV50_DISP_PIOR_PWR:
- ret = priv->pior.power(priv, or, data[0]);
- priv->pior.type[or] = type;
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index 26e9a42569c7..dbd43ae9df81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -11,6 +11,7 @@ struct nouveau_disp_impl {
struct nouveau_oclass base;
struct nouveau_oclass **outp;
struct nouveau_oclass **conn;
+ const struct nvkm_event_func *vblank;
};
#define nouveau_disp_create(p,e,c,h,i,x,d) \
@@ -39,4 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass *nvkm_output_oclass;
extern struct nouveau_oclass *nvkm_connector_oclass;
+int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *);
+void nouveau_disp_vblank(struct nouveau_disp *, int head);
+int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 7a1ebdfa9e1b..ddf1760c4400 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -32,77 +33,26 @@
#include "nv50.h"
int
-nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_sor_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
- const u32 soff = (or * 0x800);
+ union {
+ struct nv50_disp_sor_pwr_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x800;
+ u32 stat;
+ int ret;
+
+ nv_ioctl(object, "disp sor pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ stat = !!args->v0.state;
+ } else
+ return ret;
+
nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
return 0;
}
-
-int
-nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
- const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
- const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct nvkm_output *outp = NULL, *temp;
- u32 data;
- int ret = -EINVAL;
-
- if (size < sizeof(u32))
- return -EINVAL;
- data = *(u32 *)args;
-
- list_for_each_entry(temp, &priv->base.outp, head) {
- if ((temp->info.hasht & 0xff) == type &&
- (temp->info.hashm & mask) == mask) {
- outp = temp;
- break;
- }
- }
-
- switch (mthd & ~0x3f) {
- case NV50_DISP_SOR_PWR:
- ret = priv->sor.power(priv, or, data);
- break;
- case NVA3_DISP_SOR_HDA_ELD:
- ret = priv->sor.hda_eld(priv, or, args, size);
- break;
- case NV84_DISP_SOR_HDMI_PWR:
- ret = priv->sor.hdmi(priv, head, or, data);
- break;
- case NV50_DISP_SOR_LVDS_SCRIPT:
- priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
- ret = 0;
- break;
- case NV94_DISP_SOR_DP_PWR:
- if (outp) {
- struct nvkm_output_dp *outpdp = (void *)outp;
- switch (data) {
- case NV94_DISP_SOR_DP_PWR_STATE_OFF:
- nouveau_event_put(outpdp->irq);
- ((struct nvkm_output_dp_impl *)nv_oclass(outp))
- ->lnk_pwr(outpdp, 0);
- atomic_set(&outpdp->lt.done, 0);
- break;
- case NV94_DISP_SOR_DP_PWR_STATE_ON:
- nvkm_output_dp_train(&outpdp->base, 0, true);
- break;
- default:
- return -EINVAL;
- }
- }
- break;
- default:
- BUG_ON(1);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 05487cda84a8..39f85d627336 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 97f0e9cd3d40..7b7bbc3e459e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index 5103e88d1877..e1500f77a56a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -23,98 +23,143 @@
*/
#include <core/object.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
+#include <subdev/instmem.h>
+
+#include "priv.h"
static int
-nouveau_dmaobj_ctor(struct nouveau_object *parent,
+nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ const struct nvkm_dmaeng_impl *impl = (void *)
+ nv_oclass(nv_object(dmaobj)->engine);
+ int ret = 0;
+
+ if (nv_object(dmaobj) == parent) { /* ctor bind */
+ if (nv_mclass(parent->parent) == NV_DEVICE) {
+ /* delayed, or no, binding */
+ return 0;
+ }
+ ret = impl->bind(dmaobj, parent, pgpuobj);
+ if (ret == 0)
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+ }
+
+ return impl->bind(dmaobj, parent, pgpuobj);
+}
+
+int
+nvkm_dmaobj_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void **pdata, u32 *psize,
+ int length, void **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct nv_dma_v0 v0;
+ } *args = *pdata;
+ struct nouveau_instmem *instmem = nouveau_instmem(parent);
+ struct nouveau_client *client = nouveau_client(parent);
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_fb *pfb = nouveau_fb(parent);
struct nouveau_dmaobj *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- struct nv_dma_class *args = data;
+ void *data = *pdata;
+ u32 size = *psize;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
-
- ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
- *pobject = nv_object(dmaobj);
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ dmaobj = *pobject;
if (ret)
return ret;
- switch (args->flags & NV_DMA_TARGET_MASK) {
- case NV_DMA_TARGET_VM:
+ nv_ioctl(parent, "create dma size %d\n", *psize);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(parent, "create dma vers %d target %d access %d "
+ "start %016llx limit %016llx\n",
+ args->v0.version, args->v0.target, args->v0.access,
+ args->v0.start, args->v0.limit);
+ dmaobj->target = args->v0.target;
+ dmaobj->access = args->v0.access;
+ dmaobj->start = args->v0.start;
+ dmaobj->limit = args->v0.limit;
+ } else
+ return ret;
+
+ *pdata = data;
+ *psize = size;
+
+ if (dmaobj->start > dmaobj->limit)
+ return -EINVAL;
+
+ switch (dmaobj->target) {
+ case NV_DMA_V0_TARGET_VM:
dmaobj->target = NV_MEM_TARGET_VM;
break;
- case NV_DMA_TARGET_VRAM:
+ case NV_DMA_V0_TARGET_VRAM:
+ if (!client->super) {
+ if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
+ return -EACCES;
+ if (device->card_type >= NV_50)
+ return -EACCES;
+ }
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
- case NV_DMA_TARGET_PCI:
+ case NV_DMA_V0_TARGET_PCI:
+ if (!client->super)
+ return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
- case NV_DMA_TARGET_PCI_US:
- case NV_DMA_TARGET_AGP:
+ case NV_DMA_V0_TARGET_PCI_US:
+ case NV_DMA_V0_TARGET_AGP:
+ if (!client->super)
+ return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
}
- switch (args->flags & NV_DMA_ACCESS_MASK) {
- case NV_DMA_ACCESS_VM:
+ switch (dmaobj->access) {
+ case NV_DMA_V0_ACCESS_VM:
dmaobj->access = NV_MEM_ACCESS_VM;
break;
- case NV_DMA_ACCESS_RD:
+ case NV_DMA_V0_ACCESS_RD:
dmaobj->access = NV_MEM_ACCESS_RO;
break;
- case NV_DMA_ACCESS_WR:
+ case NV_DMA_V0_ACCESS_WR:
dmaobj->access = NV_MEM_ACCESS_WO;
break;
- case NV_DMA_ACCESS_RDWR:
+ case NV_DMA_V0_ACCESS_RDWR:
dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- dmaobj->start = args->start;
- dmaobj->limit = args->limit;
- dmaobj->conf0 = args->conf0;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- /* delayed, or no, binding */
- break;
- default:
- ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
- if (ret == 0) {
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- }
- break;
- }
-
return ret;
}
-static struct nouveau_ofuncs
-nouveau_dmaobj_ofuncs = {
- .ctor = nouveau_dmaobj_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
-};
-
-struct nouveau_oclass
-nouveau_dmaobj_sclass[] = {
- { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- {}
-};
+int
+_nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ const struct nvkm_dmaeng_impl *impl = (void *)oclass;
+ struct nouveau_dmaeng *dmaeng;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ",
+ "dmaobj", &dmaeng);
+ *pobject = nv_object(dmaeng);
+ if (ret)
+ return ret;
+
+ nv_engine(dmaeng)->sclass = impl->sclass;
+ dmaeng->bind = nvkm_dmaobj_bind;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 027d8217c0fa..20c9dbfe3b2e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -23,121 +23,143 @@
*/
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
#include <subdev/vm/nv04.h>
-#include <engine/dmaobj.h>
+#include "priv.h"
-struct nv04_dmaeng_priv {
- struct nouveau_dmaeng base;
+struct nv04_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ bool clone;
+ u32 flags0;
+ u32 flags2;
};
static int
-nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
+ struct nv04_dmaobj_priv *priv = (void *)dmaobj;
struct nouveau_gpuobj *gpuobj;
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags2 = 0x00000000;
- u64 offset = dmaobj->start & 0xfffff000;
- u64 adjust = dmaobj->start & 0x00000fff;
- u32 length = dmaobj->limit - dmaobj->start;
+ u64 offset = priv->base.start & 0xfffff000;
+ u64 adjust = priv->base.start & 0x00000fff;
+ u32 length = priv->base.limit - priv->base.start;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
+ case NV03_CHANNEL_DMA:
+ case NV10_CHANNEL_DMA:
+ case NV17_CHANNEL_DMA:
+ case NV40_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
- struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
- if (!dmaobj->start)
- return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
- offset = nv_ro32(pgt, 8 + (offset >> 10));
- offset &= 0xfffff000;
- }
+ if (priv->clone) {
+ struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj);
+ struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+ if (!dmaobj->start)
+ return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+ offset = nv_ro32(pgt, 8 + (offset >> 10));
+ offset &= 0xfffff000;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
+ nv_wo32(*pgpuobj, 0x04, length);
+ nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
+ nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
+ }
+
+ return ret;
+}
+
+static int
+nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine);
+ struct nv04_dmaobj_priv *priv;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret || (ret = -ENOSYS, size))
+ return ret;
- dmaobj->target = NV_MEM_TARGET_PCI;
- dmaobj->access = NV_MEM_ACCESS_RW;
+ if (priv->base.target == NV_MEM_TARGET_VM) {
+ if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass)
+ priv->clone = true;
+ priv->base.target = NV_MEM_TARGET_PCI;
+ priv->base.access = NV_MEM_ACCESS_RW;
}
- switch (dmaobj->target) {
+ priv->flags0 = nv_mclass(priv);
+ switch (priv->base.target) {
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00003000;
+ priv->flags0 |= 0x00003000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00023000;
+ priv->flags0 |= 0x00023000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00033000;
+ priv->flags0 |= 0x00033000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00004000;
+ priv->flags0 |= 0x00004000;
break;
case NV_MEM_ACCESS_WO:
- flags0 |= 0x00008000;
+ priv->flags0 |= 0x00008000;
case NV_MEM_ACCESS_RW:
- flags2 |= 0x00000002;
+ priv->flags2 |= 0x00000002;
break;
default:
return -EINVAL;
}
- ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
- *pgpuobj = gpuobj;
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
- nv_wo32(*pgpuobj, 0x04, length);
- nv_wo32(*pgpuobj, 0x08, flags2 | offset);
- nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv04_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nv04_dmaobj_ofuncs = {
+ .ctor = nv04_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nv04_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nv04_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nv04_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nv04_dmaeng_sclass,
+ .bind = nv04_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 750183f7c057..a740ddba2ee2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -22,140 +22,176 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nv50_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nv50_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
+ u32 flags5;
};
static int
-nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags5 = 0x00000000;
+ struct nv50_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- case NV50_DISP_MAST_CLASS:
- case NV84_DISP_MAST_CLASS:
- case NV94_DISP_MAST_CLASS:
- case NVA0_DISP_MAST_CLASS:
- case NVA3_DISP_MAST_CLASS:
- case NV50_DISP_SYNC_CLASS:
- case NV84_DISP_SYNC_CLASS:
- case NV94_DISP_SYNC_CLASS:
- case NVA0_DISP_SYNC_CLASS:
- case NVA3_DISP_SYNC_CLASS:
- case NV50_DISP_OVLY_CLASS:
- case NV84_DISP_OVLY_CLASS:
- case NV94_DISP_OVLY_CLASS:
- case NVA0_DISP_OVLY_CLASS:
- case NVA3_DISP_OVLY_CLASS:
+ case NV40_CHANNEL_DMA:
+ case NV50_CHANNEL_GPFIFO:
+ case G82_CHANNEL_GPFIFO:
+ case NV50_DISP_CORE_CHANNEL_DMA:
+ case G82_DISP_CORE_CHANNEL_DMA:
+ case GT206_DISP_CORE_CHANNEL_DMA:
+ case GT200_DISP_CORE_CHANNEL_DMA:
+ case GT214_DISP_CORE_CHANNEL_DMA:
+ case NV50_DISP_BASE_CHANNEL_DMA:
+ case G82_DISP_BASE_CHANNEL_DMA:
+ case GT200_DISP_BASE_CHANNEL_DMA:
+ case GT214_DISP_BASE_CHANNEL_DMA:
+ case NV50_DISP_OVERLAY_CHANNEL_DMA:
+ case G82_DISP_OVERLAY_CHANNEL_DMA:
+ case GT200_DISP_OVERLAY_CHANNEL_DMA:
+ case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
- if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
+ upper_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, priv->flags5);
+ }
+
+ return ret;
+}
+
+static int
+nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct nv50_dma_v0 v0;
+ } *args;
+ struct nv50_dmaobj_priv *priv;
+ u32 user, part, comp, kind;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+ args = data;
+
+ nv_ioctl(parent, "create nv50 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+ "comp %d kind %02x\n", args->v0.version,
+ args->v0.priv, args->v0.part, args->v0.comp,
+ args->v0.kind);
+ user = args->v0.priv;
+ part = args->v0.part;
+ comp = args->v0.comp;
+ kind = args->v0.kind;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ user = NV50_DMA_V0_PRIV_US;
+ part = NV50_DMA_V0_PART_256;
+ comp = NV50_DMA_V0_COMP_NONE;
+ kind = NV50_DMA_V0_KIND_PITCH;
} else {
- dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
- dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
- dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
- dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ user = NV50_DMA_V0_PRIV_VM;
+ part = NV50_DMA_V0_PART_VM;
+ comp = NV50_DMA_V0_COMP_VM;
+ kind = NV50_DMA_V0_KIND_VM;
}
- }
+ } else
+ return ret;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
- flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+ if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+ return -EINVAL;
+ priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
+ priv->flags5 = (part << 16);
- switch (dmaobj->target) {
+ switch (priv->base.target) {
case NV_MEM_TARGET_VM:
- flags0 |= 0x00000000;
+ priv->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00010000;
+ priv->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
+ priv->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
+ priv->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00040000;
+ priv->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags0 |= 0x00080000;
+ priv->flags0 |= 0x00080000;
break;
+ default:
+ return -EINVAL;
}
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
- upper_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, flags5);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nv50_dmaobj_ofuncs = {
+ .ctor = nv50_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nv50_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nv50_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nv50_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nv50_dmaeng_sclass,
+ .bind = nv50_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index cd3970d03b80..88ec33b20048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,32 +22,35 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nvc0_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nvc0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
+ u32 flags5;
};
static int
-nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags5 = 0x00000000;
+ struct nvc0_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NVA3_DISP_MAST_CLASS:
- case NVA3_DISP_SYNC_CLASS:
- case NVA3_DISP_OVLY_CLASS:
+ case GT214_DISP_CORE_CHANNEL_DMA:
+ case GT214_DISP_BASE_CHANNEL_DMA:
+ case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
@@ -55,89 +58,122 @@ nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
} else
return 0;
- if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
- dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
+ upper_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, priv->flags5);
+ }
+
+ return ret;
+}
+
+static int
+nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct gf100_dma_v0 v0;
+ } *args;
+ struct nvc0_dmaobj_priv *priv;
+ u32 kind, user, unkn;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+ args = data;
+
+ nv_ioctl(parent, "create gf100 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
+ args->v0.version, args->v0.priv, args->v0.kind);
+ kind = args->v0.kind;
+ user = args->v0.priv;
+ unkn = 0;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ kind = GF100_DMA_V0_KIND_PITCH;
+ user = GF100_DMA_V0_PRIV_US;
+ unkn = 2;
} else {
- dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
- dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
- dmaobj->conf0 |= 0x00020000;
+ kind = GF100_DMA_V0_KIND_VM;
+ user = GF100_DMA_V0_PRIV_VM;
+ unkn = 0;
}
- }
+ } else
+ return ret;
- flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
- flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
- flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+ if (user > 2)
+ return -EINVAL;
+ priv->flags0 |= (kind << 22) | (user << 20);
+ priv->flags5 |= (unkn << 16);
- switch (dmaobj->target) {
+ switch (priv->base.target) {
case NV_MEM_TARGET_VM:
- flags0 |= 0x00000000;
+ priv->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00010000;
+ priv->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
+ priv->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
+ priv->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00040000;
+ priv->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags0 |= 0x00080000;
+ priv->flags0 |= 0x00080000;
break;
}
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
- upper_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, flags5);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nvc0_dmaobj_ofuncs = {
+ .ctor = nvc0_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nvc0_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nvc0_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nvc0_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nvc0_dmaeng_sclass,
+ .bind = nvc0_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 1cfb3bb90131..3fc4f0b0eaca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -22,40 +22,40 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nvd0_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nvd0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
};
static int
-nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = 0x00000000;
+ struct nvd0_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NVD0_DISP_MAST_CLASS:
- case NVD0_DISP_SYNC_CLASS:
- case NVD0_DISP_OVLY_CLASS:
- case NVE0_DISP_MAST_CLASS:
- case NVE0_DISP_SYNC_CLASS:
- case NVE0_DISP_OVLY_CLASS:
- case NVF0_DISP_MAST_CLASS:
- case NVF0_DISP_SYNC_CLASS:
- case NVF0_DISP_OVLY_CLASS:
- case GM107_DISP_MAST_CLASS:
- case GM107_DISP_SYNC_CLASS:
- case GM107_DISP_OVLY_CLASS:
+ case GF110_DISP_CORE_CHANNEL_DMA:
+ case GK104_DISP_CORE_CHANNEL_DMA:
+ case GK110_DISP_CORE_CHANNEL_DMA:
+ case GM107_DISP_CORE_CHANNEL_DMA:
+ case GF110_DISP_BASE_CHANNEL_DMA:
+ case GK104_DISP_BASE_CHANNEL_DMA:
+ case GK110_DISP_BASE_CHANNEL_DMA:
+ case GF110_DISP_OVERLAY_CONTROL_DMA:
+ case GK104_DISP_OVERLAY_CONTROL_DMA:
break;
default:
return -EINVAL;
@@ -63,33 +63,11 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
} else
return 0;
- if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
- dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
- } else {
- dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
- dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
- }
- }
-
- flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
- flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
-
- switch (dmaobj->target) {
- case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00000009;
- break;
- default:
- return -EINVAL;
- break;
- }
-
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
- nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x00, priv->flags0);
+ nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
+ nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
nv_wo32(*pgpuobj, 0x0c, 0x00000000);
nv_wo32(*pgpuobj, 0x10, 0x00000000);
nv_wo32(*pgpuobj, 0x14, 0x00000000);
@@ -99,30 +77,91 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nvd0_dmaeng_priv *priv;
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct gf110_dma_v0 v0;
+ } *args;
+ struct nvd0_dmaobj_priv *priv;
+ u32 kind, page;
int ret;
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
+ args = data;
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nvd0_dmaobj_bind;
- return 0;
+ nv_ioctl(parent, "create gf110 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
+ args->v0.version, args->v0.page, args->v0.kind);
+ kind = args->v0.kind;
+ page = args->v0.page;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ kind = GF110_DMA_V0_KIND_PITCH;
+ page = GF110_DMA_V0_PAGE_SP;
+ } else {
+ kind = GF110_DMA_V0_KIND_VM;
+ page = GF110_DMA_V0_PAGE_LP;
+ }
+ } else
+ return ret;
+
+ if (page > 1)
+ return -EINVAL;
+ priv->flags0 = (kind << 20) | (page << 6);
+
+ switch (priv->base.target) {
+ case NV_MEM_TARGET_VRAM:
+ priv->flags0 |= 0x00000009;
+ break;
+ case NV_MEM_TARGET_VM:
+ case NV_MEM_TARGET_PCI:
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ /* XXX: don't currently know how to construct a real one
+ * of these. we only use them to represent pushbufs
+ * on these chipsets, and the classes that use them
+ * deal with the target themselves.
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-struct nouveau_oclass
-nvd0_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
- },
+static struct nouveau_ofuncs
+nvd0_dmaobj_ofuncs = {
+ .ctor = nvd0_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
};
+
+static struct nouveau_oclass
+nvd0_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
+ {}
+};
+
+struct nouveau_oclass *
+nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
+ },
+ .sclass = nvd0_dmaeng_sclass,
+ .bind = nvd0_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
new file mode 100644
index 000000000000..36f743866937
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_DMAOBJ_PRIV_H__
+#define __NVKM_DMAOBJ_PRIV_H__
+
+#include <engine/dmaobj.h>
+
+#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
+ nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
+
+int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void **, u32 *,
+ int, void **);
+#define _nvkm_dmaobj_dtor nouveau_object_destroy
+#define _nvkm_dmaobj_init nouveau_object_init
+#define _nvkm_dmaobj_fini nouveau_object_fini
+
+int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+#define _nvkm_dmaeng_dtor _nouveau_engine_dtor
+#define _nvkm_dmaeng_init _nouveau_engine_init
+#define _nvkm_dmaeng_fini _nouveau_engine_fini
+
+struct nvkm_dmaeng_impl {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *sclass;
+ int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *,
+ struct nouveau_gpuobj **);
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 56ed3d73bf8e..0f999fc45ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -26,11 +26,30 @@
#include <core/object.h>
#include <core/handle.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/event.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+static int
+nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ if (size == 0) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -ENOSYS;
+}
+
+static const struct nvkm_event_func
+nouveau_fifo_event_func = {
+ .ctor = nouveau_fifo_event_ctor,
+};
+
int
nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -59,14 +78,14 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case NV_DMA_FROM_MEMORY_CLASS:
- case NV_DMA_IN_MEMORY_CLASS:
+ case NV_DMA_FROM_MEMORY:
+ case NV_DMA_IN_MEMORY:
break;
default:
return -EINVAL;
}
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
if (ret)
return ret;
@@ -85,15 +104,10 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
return -ENOSPC;
}
- /* map fifo control registers */
- chan->user = ioremap(nv_device_resource_start(device, bar) + addr +
- (chan->chid * size), size);
- if (!chan->user)
- return -EFAULT;
-
- nouveau_event_trigger(priv->cevent, 1, 0);
-
+ chan->addr = nv_device_resource_start(device, bar) +
+ addr + size * chan->chid;
chan->size = size;
+ nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
return 0;
}
@@ -103,7 +117,8 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
unsigned long flags;
- iounmap(chan->user);
+ if (chan->user)
+ iounmap(chan->user);
spin_lock_irqsave(&priv->lock, flags);
priv->channel[chan->chid] = NULL;
@@ -121,10 +136,24 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
nouveau_fifo_channel_destroy(chan);
}
+int
+_nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ *addr = chan->addr;
+ *size = chan->size;
+ return 0;
+}
+
u32
_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (WARN_ON_ONCE(chan->user == NULL))
+ return 0;
+ }
return ioread32_native(chan->user + addr);
}
@@ -132,9 +161,57 @@ void
_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (WARN_ON_ONCE(chan->user == NULL))
+ return;
+ }
iowrite32_native(data, chan->user + addr);
}
+int
+nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ union {
+ struct nvif_notify_uevent_req none;
+ } *req = data;
+ int ret;
+
+ if (nvif_unvers(req->none)) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+ notify->index = 0;
+ }
+
+ return ret;
+}
+
+void
+nouveau_fifo_uevent(struct nouveau_fifo *fifo)
+{
+ struct nvif_notify_uevent_rep rep = {
+ };
+ nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
+}
+
+int
+_nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type,
+ struct nvkm_event **event)
+{
+ struct nouveau_fifo *fifo = (void *)object->engine;
+ switch (type) {
+ case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+ if (nv_mclass(object) >= G82_CHANNEL_DMA) {
+ *event = &fifo->uevent;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static int
nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
{
@@ -168,8 +245,8 @@ void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
- nouveau_event_destroy(&priv->uevent);
- nouveau_event_destroy(&priv->cevent);
+ nvkm_event_fini(&priv->uevent);
+ nvkm_event_fini(&priv->cevent);
nouveau_engine_destroy(&priv->base);
}
@@ -194,11 +271,7 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
- ret = nouveau_event_create(1, 1, &priv->cevent);
- if (ret)
- return ret;
-
- ret = nouveau_event_create(1, 1, &priv->uevent);
+ ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index c61b16a63884..5ae6a43893b5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
@@ -117,16 +118,23 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan);
@@ -134,13 +142,15 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -242,13 +252,15 @@ nv04_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv04_fifo_sclass[] = {
- { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
+ { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
{}
};
@@ -539,7 +551,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status & 0x40000000) {
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
nv_wr32(priv, 0x002100, 0x40000000);
status &= ~0x40000000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 571a22aa1ae5..2a32add51c81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -59,16 +60,23 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan);
@@ -76,13 +84,15 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -100,13 +110,15 @@ nv10_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv10_fifo_sclass[] = {
- { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
+ { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index f25760209316..12d76c8adb23 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -64,16 +65,23 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -83,13 +91,15 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 64;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -107,13 +117,15 @@ nv17_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv17_fifo_sclass[] = {
- { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
+ { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 343487ed2238..9f49c3a24dc6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -182,16 +183,23 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x1000, args->pushbuf,
+ 0x1000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -200,14 +208,16 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 128;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -226,13 +236,15 @@ nv40_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv40_fifo_sclass[] = {
- { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
+ { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e6352bd5b4ff..5d1e86bc244c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -25,7 +25,8 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -194,17 +195,24 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -213,6 +221,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -223,10 +233,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -247,18 +257,26 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_channel_ind_class *args = data;
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
u64 ioffset, ilength;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -267,6 +285,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -277,8 +297,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -343,8 +363,10 @@ nv50_fifo_ofuncs_dma = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_ofuncs
@@ -353,14 +375,16 @@ nv50_fifo_ofuncs_ind = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv50_fifo_sclass[] = {
- { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
- { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
+ { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
+ { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 6e5ac16e5460..1f42996b354a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -27,7 +27,8 @@
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -160,17 +161,24 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -186,6 +194,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
&chan->ramht);
if (ret)
@@ -196,10 +206,10 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -222,18 +232,26 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv50_channel_ind_class *args = data;
u64 ioffset, ilength;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -249,6 +267,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
&chan->ramht);
if (ret)
@@ -259,8 +279,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -304,8 +324,10 @@ nv84_fifo_ofuncs_dma = {
.dtor = nv50_fifo_chan_dtor,
.init = nv84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_ofuncs
@@ -314,14 +336,16 @@ nv84_fifo_ofuncs_ind = {
.dtor = nv50_fifo_chan_dtor,
.init = nv84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv84_fifo_sclass[] = {
- { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
- { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
+ { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma },
+ { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind },
{}
};
@@ -389,19 +413,26 @@ nv84_fifo_cclass = {
******************************************************************************/
static void
-nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nv84_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
}
static void
-nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nv84_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
}
+static const struct nvkm_event_func
+nv84_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nv84_fifo_uevent_init,
+ .fini = nv84_fifo_uevent_fini,
+};
+
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -425,9 +456,9 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nv84_fifo_uevent_enable;
- priv->base.uevent->disable = nv84_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ae4a4dc5642a..1fe1f8fbda0c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -28,7 +28,8 @@
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -187,20 +188,28 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nvc0_fifo_priv *priv = (void *)engine;
struct nvc0_fifo_base *base = (void *)parent;
struct nvc0_fifo_chan *chan;
- struct nv50_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
- args->pushbuf,
+ args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_COPY0) |
@@ -212,12 +221,14 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
usermem = chan->base.chid * 0x1000;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -291,13 +302,15 @@ nvc0_fifo_ofuncs = {
.dtor = _nouveau_fifo_channel_dtor,
.init = nvc0_fifo_chan_init,
.fini = nvc0_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nvc0_fifo_sclass[] = {
- { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
+ { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs },
{}
};
@@ -654,7 +667,7 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
object = engctx;
while (object) {
switch (nv_mclass(object)) {
- case NVC0_CHANNEL_IND_CLASS:
+ case FERMI_CHANNEL_GPFIFO:
nvc0_fifo_recover(priv, engine, (void *)object);
break;
}
@@ -730,7 +743,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
ints &= ~1;
}
if (ints) {
@@ -827,19 +840,26 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nvc0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nvc0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
+static const struct nvkm_event_func
+nvc0_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nvc0_fifo_uevent_init,
+ .fini = nvc0_fifo_uevent_fini,
+};
+
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -877,9 +897,9 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nvc0_fifo_uevent_enable;
- priv->base.uevent->disable = nvc0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 298063edb92d..d2f0fd39c145 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -28,7 +28,8 @@
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -216,46 +217,56 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct kepler_channel_gpfifo_a_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nve0_fifo_priv *priv = (void *)engine;
struct nve0_fifo_base *base = (void *)parent;
struct nve0_fifo_chan *chan;
- struct nve0_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x engine %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength, args->v0.engine);
+ } else
+ return ret;
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- if (args->engine & (1 << i)) {
+ if (args->v0.engine & (1 << i)) {
if (nouveau_engine(parent, fifo_engine[i].subdev)) {
- args->engine = (1 << i);
+ args->v0.engine = (1 << i);
break;
}
}
}
if (i == FIFO_ENGINE_NR) {
- nv_error(priv, "unsupported engines 0x%08x\n", args->engine);
+ nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
return -ENODEV;
}
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x200,
- args->pushbuf,
+ args->v0.pushbuf,
fifo_engine[i].mask, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nve0_fifo_context_attach;
nv_parent(chan)->context_detach = nve0_fifo_context_detach;
chan->engine = i;
usermem = chan->base.chid * 0x200;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
for (i = 0; i < 0x200; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -325,13 +336,15 @@ nve0_fifo_ofuncs = {
.dtor = _nouveau_fifo_channel_dtor,
.init = nve0_fifo_chan_init,
.fini = nve0_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nve0_fifo_sclass[] = {
- { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
+ { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs },
{}
};
@@ -769,7 +782,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
object = engctx;
while (object) {
switch (nv_mclass(object)) {
- case NVE0_CHANNEL_IND_CLASS:
+ case KEPLER_CHANNEL_GPFIFO_A:
nve0_fifo_recover(priv, engine, (void *)object);
break;
}
@@ -859,7 +872,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
static void
nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
{
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
}
static void
@@ -952,19 +965,26 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nve0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nve0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
+static const struct nvkm_event_func
+nve0_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nve0_fifo_uevent_init,
+ .fini = nve0_fifo_uevent_fini,
+};
+
int
nve0_fifo_fini(struct nouveau_object *object, bool suspend)
{
@@ -1067,9 +1087,9 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nve0_fifo_uevent_enable;
- priv->base.uevent->disable = nve0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
new file mode 100644
index 000000000000..3adb7fe91772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gk110b_grctx_init_sm_0[] = {
+ { 0x419e04, 1, 0x04, 0x00000000 },
+ { 0x419e08, 1, 0x04, 0x0000001d },
+ { 0x419e0c, 1, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x0013eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 2, 0x04, 0x00000000 },
+ { 0x419e58, 1, 0x04, 0x00000001 },
+ { 0x419e5c, 3, 0x04, 0x00000000 },
+ { 0x419e68, 1, 0x04, 0x00000002 },
+ { 0x419e6c, 12, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x00001f8f },
+ { 0x419eb0, 1, 0x04, 0x0db00d2f },
+ { 0x419eb8, 1, 0x04, 0x00000000 },
+ { 0x419ec8, 1, 0x04, 0x0001304f },
+ { 0x419f30, 4, 0x04, 0x00000000 },
+ { 0x419f40, 1, 0x04, 0x00000018 },
+ { 0x419f44, 3, 0x04, 0x00000000 },
+ { 0x419f58, 1, 0x04, 0x00000000 },
+ { 0x419f70, 1, 0x04, 0x00006300 },
+ { 0x419f78, 1, 0x04, 0x000000eb },
+ { 0x419f7c, 1, 0x04, 0x00000404 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gk110b_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nvf0_grctx_init_tex_0 },
+ { nvf0_grctx_init_mpc_0 },
+ { nvf0_grctx_init_l1c_0 },
+ { gk110b_grctx_init_sm_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+struct nouveau_oclass *
+gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0xf1),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = nve4_grctx_generate_main,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nvf0_grctx_pack_hub,
+ .gpc = nvf0_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = gk110b_grctx_pack_tpc,
+ .ppc = nvf0_grctx_pack_ppc,
+ .icmd = nvf0_grctx_pack_icmd,
+ .mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x600,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
index 224ee0287ab7..36fc9831cc93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
@@ -41,7 +41,6 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nve4_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nve4_grctx_pack_hub,
.gpc = nve4_grctx_pack_gpc,
@@ -50,4 +49,15 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nve4_grctx_pack_ppc,
.icmd = nve4_grctx_pack_icmd,
.mthd = gk20a_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .bundle_min_gpm_fifo_depth = 0x62,
+ .bundle_token_limit = 0x100,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x240,
+ .attrib_nr = 0x240,
+ .alpha_nr_max = 0x648 + (0x648 / 2),
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index b0d0fb2f4d08..62e918b9fa81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -859,45 +859,74 @@ gm107_grctx_pack_ppc[] = {
******************************************************************************/
static void
-gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gm107_grctx_generate_bundle(struct nvc0_grctx *info)
{
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
-
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x418e30, 0x80000000, 0, 0);
-
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418e24, 0x00000000, 8, 0);
- mmio_list(0x418e28, 0x80000030, 0, 0);
-
- mmio_list(0x4064c8, 0x018002c0, 0, 0);
-
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419c2c, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x0aa01000, 0, 0);
- mmio_list(0x4064c4, 0x0400ffff, 0, 0);
-
- /*XXX*/
- mmio_list(0x5030c0, 0x00001540, 0, 0);
- mmio_list(0x5030f4, 0x00000000, 0, 0);
- mmio_list(0x5030e4, 0x00002000, 0, 0);
- mmio_list(0x5030f8, 0x00003fc0, 0, 0);
- mmio_list(0x418ea0, 0x07151540, 0, 0);
-
- mmio_list(0x5032c0, 0x00001540, 0, 0);
- mmio_list(0x5032f4, 0x00001fe0, 0, 0);
- mmio_list(0x5032e4, 0x00002000, 0, 0);
- mmio_list(0x5032f8, 0x00006fc0, 0, 0);
- mmio_list(0x418ea4, 0x07151540, 0, 0);
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
+ impl->bundle_size / 0x20);
+ const u32 token_limit = impl->bundle_token_limit;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418e24, 0x00000000, s, b);
+ mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
+}
+
+static void
+gm107_grctx_generate_pagepool(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+ mmio_wr32(info, 0x4064cc, 0x80000000);
+ mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */
+}
+
+static void
+gm107_grctx_generate_attrib(struct nvc0_grctx *info)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 attrib = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs);
+ }
+ }
}
static void
@@ -934,7 +963,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
gm107_grctx_generate_tpcid(priv);
@@ -979,7 +1010,6 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = gm107_grctx_generate_main,
- .mods = gm107_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
.gpc = gm107_grctx_pack_gpc,
@@ -988,4 +1018,15 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = gm107_grctx_pack_ppc,
.icmd = gm107_grctx_pack_icmd,
.mthd = gm107_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x2c0,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0xff0,
+ .attrib_nr = 0xaa0,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index 8de4a4291548..ce252adbef81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -531,50 +531,6 @@ nv108_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
-{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x00c20200, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
- }
-
- mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
- mmio_list(0x17e920, 0x00090d08, 0, 0);
-}
-
struct nouveau_oclass *
nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
@@ -587,7 +543,6 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nv108_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nv108_grctx_pack_hub,
.gpc = nv108_grctx_pack_gpc,
@@ -596,4 +551,15 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nv108_grctx_pack_ppc,
.icmd = nv108_grctx_pack_icmd,
.mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0xc2,
+ .bundle_token_limit = 0x200,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 833a96508c4e..b8e5fe60a1eb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -982,34 +982,93 @@ nvc0_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
+int
+nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+{
+ if (info->data) {
+ info->buffer[info->buffer_nr] = round_up(info->addr, align);
+ info->addr = info->buffer[info->buffer_nr] + size;
+ info->data->size = size;
+ info->data->align = align;
+ info->data->access = access;
+ info->data++;
+ return info->buffer_nr++;
+ }
+ return -1;
+}
+
+void
+nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data,
+ int shift, int buffer)
+{
+ if (info->data) {
+ if (shift >= 0) {
+ info->mmio->addr = addr;
+ info->mmio->data = data;
+ info->mmio->shift = shift;
+ info->mmio->buffer = buffer;
+ if (buffer >= 0)
+ data |= info->buffer[buffer] >> shift;
+ info->mmio++;
+ } else
+ return;
+ } else {
+ if (buffer >= 0)
+ return;
+ }
+
+ nv_wr32(info->priv, addr, data);
+}
+
+void
+nvc0_grctx_generate_bundle(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418808, 0x00000000, s, b);
+ mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+}
+
void
-nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nvc0_grctx_generate_pagepool(struct nvc0_grctx *info)
{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+void
+nvc0_grctx_generate_attrib(struct nvc0_grctx *info)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 attrib = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
int gpc, tpc;
- u32 offset;
-
- mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
-
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
-
- mmio_list(0x405830, 0x02180000, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u32 bo = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (attrib << 16));
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0520);
- mmio_list(addr, 0x02180000 | offset, 0, 0);
- offset += 0x0324;
+ const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
+ mmio_skip(info, o, (attrib << 16) | ++bo);
+ mmio_wr32(info, o, (attrib << 16) | --bo);
+ bo += impl->attrib_nr_max;
}
}
}
@@ -1170,7 +1229,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -1180,7 +1239,9 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -1192,7 +1253,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
}
int
@@ -1308,7 +1369,6 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc0_grctx_pack_gpc,
@@ -1316,4 +1376,11 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc0_grctx_pack_tpc,
.icmd = nvc0_grctx_pack_icmd,
.mthd = nvc0_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
index 8da8b627b9d0..c776cd715e33 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -12,12 +12,19 @@ struct nvc0_grctx {
u64 addr;
};
+int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access);
+void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int);
+
+#define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d))
+#define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e))
+#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
+#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
+
struct nvc0_grctx_oclass {
struct nouveau_oclass base;
/* main context generation function */
void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
/* context-specific modify-on-first-load list generation function */
- void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
void (*unkn)(struct nvc0_graph_priv *);
/* mmio context data */
const struct nvc0_graph_pack *hub;
@@ -28,30 +35,34 @@ struct nvc0_grctx_oclass {
/* indirect context data, generated with icmds/mthds */
const struct nvc0_graph_pack *icmd;
const struct nvc0_graph_pack *mthd;
+ /* bundle circular buffer */
+ void (*bundle)(struct nvc0_grctx *);
+ u32 bundle_size;
+ u32 bundle_min_gpm_fifo_depth;
+ u32 bundle_token_limit;
+ /* pagepool */
+ void (*pagepool)(struct nvc0_grctx *);
+ u32 pagepool_size;
+ /* attribute(/alpha) circular buffer */
+ void (*attrib)(struct nvc0_grctx *);
+ u32 attrib_nr_max;
+ u32 attrib_nr;
+ u32 alpha_nr_max;
+ u32 alpha_nr;
};
-#define mmio_data(s,a,p) do { \
- info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
- info->addr = info->buffer[info->buffer_nr++] + (s); \
- info->data->size = (s); \
- info->data->align = (a); \
- info->data->access = (p); \
- info->data++; \
-} while(0)
-
-#define mmio_list(r,d,s,b) do { \
- info->mmio->addr = (r); \
- info->mmio->data = (d); \
- info->mmio->shift = (s); \
- info->mmio->buffer = (b); \
- info->mmio++; \
- nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
-} while(0)
+static inline const struct nvc0_grctx_oclass *
+nvc0_grctx_impl(struct nvc0_graph_priv *priv)
+{
+ return (void *)nv_engine(priv)->cclass;
+}
extern struct nouveau_oclass *nvc0_grctx_oclass;
int nvc0_grctx_generate(struct nvc0_graph_priv *);
void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_generate_bundle(struct nvc0_grctx *);
+void nvc0_grctx_generate_pagepool(struct nvc0_grctx *);
+void nvc0_grctx_generate_attrib(struct nvc0_grctx *);
void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
@@ -60,22 +71,27 @@ void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvc1_grctx_oclass;
-void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc1_grctx_generate_attrib(struct nvc0_grctx *);
void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvc4_grctx_oclass;
extern struct nouveau_oclass *nvc8_grctx_oclass;
+
extern struct nouveau_oclass *nvd7_grctx_oclass;
+void nvd7_grctx_generate_attrib(struct nvc0_grctx *);
+
extern struct nouveau_oclass *nvd9_grctx_oclass;
extern struct nouveau_oclass *nve4_grctx_oclass;
extern struct nouveau_oclass *gk20a_grctx_oclass;
void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nve4_grctx_generate_bundle(struct nvc0_grctx *);
+void nve4_grctx_generate_pagepool(struct nvc0_grctx *);
void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nouveau_oclass *gk110b_grctx_oclass;
extern struct nouveau_oclass *nv108_grctx_oclass;
extern struct nouveau_oclass *gm107_grctx_oclass;
@@ -160,16 +176,23 @@ extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[];
+
extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[];
extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[];
extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
+extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[];
+
extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index 24a92c569c0a..c6ba8fed18f1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -727,38 +727,38 @@ nvc1_grctx_pack_tpc[] = {
******************************************************************************/
void
-nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nvc1_grctx_generate_attrib(struct nvc0_grctx *info)
{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 beta = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int timeslice_mode = 1;
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
int gpc, tpc;
- u32 offset;
- mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (beta << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
- mmio_list(0x405830, 0x02180218, 0, 0);
- mmio_list(0x4064c4, 0x0086ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0520);
- mmio_list(addr, 0x12180000 | offset, 0, 0);
- offset += 0x0324;
- }
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0544);
- mmio_list(addr, 0x02180000 | offset, 0, 0);
- offset += 0x0324;
+ const u32 a = alpha;
+ const u32 b = beta;
+ const u32 t = timeslice_mode;
+ const u32 o = TPC_UNIT(gpc, tpc, 0x500);
+ mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
+ mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
+ bo += impl->attrib_nr_max;
+ mmio_wr32(info, o + 0x44, (a << 16) | ao);
+ ao += impl->alpha_nr_max;
}
}
}
@@ -786,7 +786,6 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
.unkn = nvc1_grctx_generate_unkn,
.hub = nvc1_grctx_pack_hub,
.gpc = nvc1_grctx_pack_gpc,
@@ -794,4 +793,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc1_grctx_pack_tpc,
.icmd = nvc1_grctx_pack_icmd,
.mthd = nvc1_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc1_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x324,
+ .alpha_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
index e11ed5538193..41705c60cc47 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
@@ -92,7 +92,6 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc0_grctx_pack_gpc,
@@ -100,4 +99,11 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc4_grctx_pack_tpc,
.icmd = nvc0_grctx_pack_icmd,
.mthd = nvc0_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
index feebd58dfe8d..8f804cd8f9c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
@@ -343,7 +343,6 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc8_grctx_pack_gpc,
@@ -351,4 +350,11 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc0_grctx_pack_tpc,
.icmd = nvc8_grctx_pack_icmd,
.mthd = nvc8_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 1dbc8d7f2e86..fcf534fd9e65 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -177,44 +177,41 @@ nvd7_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+void
+nvd7_grctx_generate_attrib(struct nvc0_grctx *info)
{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 beta = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int timeslice_mode = 1;
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ int gpc, ppc;
- mmio_list(0x405830, 0x02180324, 0, 0);
- mmio_list(0x4064c4, 0x00c9ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0324 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (beta << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
+ const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 t = timeslice_mode;
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
+ mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
+ bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, (a << 16) | ao);
+ ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ }
}
- mmio_list(0x17e91c, 0x03060609, 0, 0); /* different from kepler */
}
void
@@ -223,7 +220,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -233,7 +230,9 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -248,7 +247,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
}
struct nouveau_oclass *
@@ -263,7 +262,6 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvd7_grctx_generate_main,
- .mods = nvd7_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nvd7_grctx_pack_hub,
.gpc = nvd7_grctx_pack_gpc,
@@ -272,4 +270,13 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nvd7_grctx_pack_ppc,
.icmd = nvd9_grctx_pack_icmd,
.mthd = nvd9_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x324,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index c665fb7e4660..b9a301b6fd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -511,7 +511,6 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
.unkn = nvc1_grctx_generate_unkn,
.hub = nvd9_grctx_pack_hub,
.gpc = nvd9_grctx_pack_gpc,
@@ -519,4 +518,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvd9_grctx_pack_tpc,
.icmd = nvd9_grctx_pack_icmd,
.mthd = nvd9_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc1_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x324,
+ .alpha_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index c5b249238587..ccac2ee1a1cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -839,47 +839,34 @@ nve4_grctx_pack_ppc[] = {
******************************************************************************/
void
-nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nve4_grctx_generate_bundle(struct nvc0_grctx *info)
{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x01800600, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
- }
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
+ impl->bundle_size / 0x20);
+ const u32 token_limit = impl->bundle_token_limit;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418808, 0x00000000, s, b);
+ mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
+}
- mmio_list(0x17e91c, 0x06060609, 0, 0);
- mmio_list(0x17e920, 0x00090a05, 0, 0);
+void
+nve4_grctx_generate_pagepool(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+ mmio_wr32(info, 0x4064cc, 0x80000000);
}
void
@@ -957,7 +944,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -967,7 +954,9 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -991,7 +980,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
@@ -1009,7 +998,6 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nve4_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nve4_grctx_pack_hub,
.gpc = nve4_grctx_pack_gpc,
@@ -1018,4 +1006,15 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nve4_grctx_pack_ppc,
.icmd = nve4_grctx_pack_icmd,
.mthd = nve4_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x600,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dec03f04114d..e9b0dcf95a49 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -279,7 +279,7 @@ nvf0_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_icmd[] = {
{ nvf0_grctx_init_icmd_0 },
{}
@@ -668,7 +668,7 @@ nvf0_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_hub[] = {
{ nvc0_grctx_init_main_0 },
{ nvf0_grctx_init_fe_0 },
@@ -704,7 +704,7 @@ nvf0_grctx_init_gpc_unk_2[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_gpc[] = {
{ nvc0_grctx_init_gpc_unk_0 },
{ nvd9_grctx_init_prop_0 },
@@ -718,7 +718,7 @@ nvf0_grctx_pack_gpc[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
@@ -797,7 +797,7 @@ nvf0_grctx_init_cbm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_ppc[] = {
{ nve4_grctx_init_pes_0 },
{ nvf0_grctx_init_cbm_0 },
@@ -809,58 +809,6 @@ nvf0_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
-{
- u32 magic[GPC_MAX][4];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x01800600, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * (priv->tpc_nr[gpc] - 1);
- u16 magic1 = 0x0648 * (priv->tpc_nr[gpc] - 1);
- u16 magic2 = 0x0218;
- u16 magic3 = 0x0648;
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * (priv->tpc_nr[gpc] - 1);
- magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset;
- magic[gpc][3] = 0x00000000 | (magic3 << 16);
- offset += 0x0324;
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * (priv->tpc_nr[gpc] - 1);
- mmio_list(GPC_UNIT(gpc, 0x32c0), magic[gpc][2], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x32e4), magic[gpc][3] | offset, 0, 0);
- offset += 0x07ff;
- }
-
- mmio_list(0x17e91c, 0x06060609, 0, 0);
- mmio_list(0x17e920, 0x00090a05, 0, 0);
-}
-
struct nouveau_oclass *
nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
@@ -873,7 +821,6 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nvf0_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nvf0_grctx_pack_hub,
.gpc = nvf0_grctx_pack_gpc,
@@ -882,4 +829,15 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nvf0_grctx_pack_ppc,
.icmd = nvf0_grctx_pack_icmd,
.mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x7c0,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
new file mode 100644
index 000000000000..d07b19dc168d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gk110b_graph_init_l1c_0[] = {
+ { 0x419c98, 1, 0x04, 0x00000000 },
+ { 0x419ca8, 1, 0x04, 0x00000000 },
+ { 0x419cb0, 1, 0x04, 0x09000000 },
+ { 0x419cb4, 1, 0x04, 0x00000000 },
+ { 0x419cb8, 1, 0x04, 0x00b08bea },
+ { 0x419c84, 1, 0x04, 0x00010384 },
+ { 0x419cbc, 1, 0x04, 0x281b3646 },
+ { 0x419cc0, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x00020230 },
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gk110b_graph_init_sm_0[] = {
+ { 0x419e00, 1, 0x04, 0x00000080 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x00000000 },
+ { 0x419f00, 1, 0x04, 0x00000000 },
+ { 0x419ed0, 1, 0x04, 0x00002616 },
+ { 0x419f74, 1, 0x04, 0x00015555 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gk110b_graph_pack_mmio[] = {
+ { nve4_graph_init_main_0 },
+ { nvf0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nvf0_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvf0_graph_init_sked_0 },
+ { nvf0_graph_init_cwd_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvf0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nve4_graph_init_tpccs_0 },
+ { nvf0_graph_init_tex_0 },
+ { nve4_graph_init_pe_0 },
+ { gk110b_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { gk110b_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nve4_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+struct nouveau_oclass *
+gk110b_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0xf1),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve4_graph_init,
+ .fini = nvf0_graph_fini,
+ },
+ .cclass = &gk110b_grctx_oclass,
+ .sclass = nvf0_graph_sclass,
+ .mmio = gk110b_graph_pack_mmio,
+ .fecs.ucode = &nvf0_graph_fecs_ucode,
+ .gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
index 83048a56430d..7d0abe9f3fe7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
@@ -27,8 +27,8 @@ static struct nouveau_oclass
gk20a_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa040, &nouveau_object_ofuncs },
- { 0xa297, &nouveau_object_ofuncs },
- { 0xa0c0, &nouveau_object_ofuncs },
+ { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -39,9 +39,10 @@ gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
.ctor = nvc0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve4_graph_init,
- .fini = nve4_graph_fini,
+ .fini = _nouveau_graph_fini,
},
.cclass = &gk20a_grctx_oclass,
.sclass = gk20a_graph_sclass,
.mmio = nve4_graph_pack_mmio,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
index 21c5f31d607f..4bdbdab2fd9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
@@ -36,8 +36,8 @@ static struct nouveau_oclass
gm107_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xb097, &nouveau_object_ofuncs },
- { 0xb0c0, &nouveau_object_ofuncs },
+ { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -425,6 +425,9 @@ gm107_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x2c350f63);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -462,4 +465,5 @@ gm107_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = gm107_graph_pack_mmio,
.fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
.gpccs.ucode = &gm107_graph_gpccs_ucode,
+ .ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index ad13dcdd15f9..f70e2f67a4dd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <core/namedb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 4532f7e5618c..2b12b09683c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
index 00ea1a089822..2b0e8f48c029 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -33,7 +33,7 @@ static struct nouveau_oclass
nv108_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xa197, &nouveau_object_ofuncs },
+ { KEPLER_B, &nvc0_fermi_ofuncs },
{ 0xa1c0, &nouveau_object_ofuncs },
{}
};
@@ -220,4 +220,5 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nv108_graph_pack_mmio,
.fecs.ucode = &nv108_graph_fecs_ucode,
.gpccs.ucode = &nv108_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index d145e080899a..ceb9c746d94e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,6 +1,5 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/handle.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index 7a80d005a974..f8a6fdd7d5e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 3e1f32ee43d4..5de9caa2ef67 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index e451db32e92a..2f9dbc709389 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 9385ac7b44a4..34dd26c70b64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index 9ce84b73f86a..2fb5756d9f66 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 6477fbf6a550..4f401174868d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <core/engctx.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 20665c21d80e..38e0aa26f1cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/client.h>
#include <core/handle.h>
#include <core/engctx.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index aa0838916354..30fd1dc64f93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -26,15 +26,232 @@
#include "ctxnvc0.h"
/*******************************************************************************
+ * Zero Bandwidth Clear
+ ******************************************************************************/
+
+static void
+nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc)
+{
+ if (priv->zbc_color[zbc].format) {
+ nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
+ nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
+ nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
+ nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
+ }
+ nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
+ nv_wr32(priv, 0x405820, zbc);
+ nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+}
+
+static int
+nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
+ const u32 ds[4], const u32 l2[4])
+{
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int zbc = -ENOSPC, i;
+
+ for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+ if (priv->zbc_color[i].format) {
+ if (priv->zbc_color[i].format != format)
+ continue;
+ if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
+ priv->zbc_color[i].ds)))
+ continue;
+ if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
+ priv->zbc_color[i].l2))) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return i;
+ } else {
+ zbc = (zbc < 0) ? i : zbc;
+ }
+ }
+
+ if (zbc < 0)
+ return zbc;
+
+ memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
+ memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
+ priv->zbc_color[zbc].format = format;
+ ltc->zbc_color_get(ltc, zbc, l2);
+ nvc0_graph_zbc_clear_color(priv, zbc);
+ return zbc;
+}
+
+static void
+nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc)
+{
+ if (priv->zbc_depth[zbc].format)
+ nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
+ nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
+ nv_wr32(priv, 0x405820, zbc);
+ nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+}
+
+static int
+nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
+ const u32 ds, const u32 l2)
+{
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int zbc = -ENOSPC, i;
+
+ for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+ if (priv->zbc_depth[i].format) {
+ if (priv->zbc_depth[i].format != format)
+ continue;
+ if (priv->zbc_depth[i].ds != ds)
+ continue;
+ if (priv->zbc_depth[i].l2 != l2) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return i;
+ } else {
+ zbc = (zbc < 0) ? i : zbc;
+ }
+ }
+
+ if (zbc < 0)
+ return zbc;
+
+ priv->zbc_depth[zbc].format = format;
+ priv->zbc_depth[zbc].ds = ds;
+ priv->zbc_depth[zbc].l2 = l2;
+ ltc->zbc_depth_get(ltc, zbc, l2);
+ nvc0_graph_zbc_clear_depth(priv, zbc);
+ return zbc;
+}
+
+/*******************************************************************************
* Graphics object classes
******************************************************************************/
+static int
+nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)object->engine;
+ union {
+ struct fermi_a_zbc_color_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ switch (args->v0.format) {
+ case FERMI_A_ZBC_COLOR_V0_FMT_ZERO:
+ case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32:
+ case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
+ ret = nvc0_graph_zbc_color_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
+ if (ret >= 0) {
+ args->v0.index = ret;
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)object->engine;
+ union {
+ struct fermi_a_zbc_depth_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ switch (args->v0.format) {
+ case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
+ ret = nvc0_graph_zbc_depth_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
+ return (ret >= 0) ? 0 : -ENOSPC;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+{
+ switch (mthd) {
+ case FERMI_A_ZBC_COLOR:
+ return nvc0_fermi_mthd_zbc_color(object, data, size);
+ case FERMI_A_ZBC_DEPTH:
+ return nvc0_fermi_mthd_zbc_depth(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+struct nouveau_ofuncs
+nvc0_fermi_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nvc0_fermi_mthd,
+};
+
+static int
+nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd,
+ void *pdata, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)nv_engine(object);
+ if (size >= sizeof(u32)) {
+ u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
+ nv_wr32(priv, 0x419e44, data);
+ nv_wr32(priv, 0x419e4c, data);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+struct nouveau_omthds
+nvc0_graph_9097_omthds[] = {
+ { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+ {}
+};
+
+struct nouveau_omthds
+nvc0_graph_90c0_omthds[] = {
+ { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+ {}
+};
+
struct nouveau_oclass
nvc0_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -98,7 +315,7 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
u32 addr = mmio->addr;
u32 data = mmio->data;
- if (mmio->shift) {
+ if (mmio->buffer >= 0) {
u64 info = chan->data[mmio->buffer].vma.offset;
data |= info >> mmio->shift;
}
@@ -407,6 +624,35 @@ nvc0_graph_pack_mmio[] = {
******************************************************************************/
void
+nvc0_graph_zbc_init(struct nvc0_graph_priv *priv)
+{
+ const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff };
+ const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int index;
+
+ if (!priv->zbc_color[0].format) {
+ nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]);
+ nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]);
+ nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
+ nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
+ nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
+ nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+ }
+
+ for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
+ nvc0_graph_zbc_clear_color(priv, index);
+ for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
+ nvc0_graph_zbc_clear_depth(priv, index);
+}
+
+void
nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
{
const struct nvc0_graph_pack *pack;
@@ -969,17 +1215,16 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
- u32 r000260;
int i;
if (priv->firmware) {
/* load fuc microcode */
- r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
&priv->fuc409d);
nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
&priv->fuc41ad);
- nv_wr32(priv, 0x000260, r000260);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
/* start both of them running */
nv_wr32(priv, 0x409840, 0xffffffff);
@@ -1066,7 +1311,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
/* load HUB microcode */
- r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nv_wr32(priv, 0x4091c0, 0x01000000);
for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
@@ -1089,7 +1334,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a188, i >> 6);
nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
}
- nv_wr32(priv, 0x000260, r000260);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
/* load register lists */
nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
@@ -1224,6 +1469,9 @@ nvc0_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x34ce3464);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -1287,7 +1535,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
bool use_ext_fw, enable;
- int ret, i;
+ int ret, i, j;
use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
oclass->fecs.ucode == NULL);
@@ -1333,6 +1581,11 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
for (i = 0; i < priv->gpc_nr; i++) {
priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
priv->tpc_total += priv->tpc_nr[i];
+ priv->ppc_nr[i] = oclass->ppc_nr;
+ for (j = 0; j < priv->ppc_nr[i]; j++) {
+ u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
+ priv->ppc_tpc_nr[i][j] = hweight8(mask);
+ }
}
/*XXX: these need figuring out... though it might not even matter */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ffc289198dd8..7ed9e89c3435 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -30,10 +30,15 @@
#include <core/gpuobj.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
+#include <subdev/mc.h>
+#include <subdev/ltc.h>
#include <engine/fifo.h>
#include <engine/graph.h>
@@ -60,7 +65,7 @@ struct nvc0_graph_mmio {
u32 addr;
u32 data;
u32 shift;
- u32 buffer;
+ int buffer;
};
struct nvc0_graph_fuc {
@@ -68,6 +73,18 @@ struct nvc0_graph_fuc {
u32 size;
};
+struct nvc0_graph_zbc_color {
+ u32 format;
+ u32 ds[4];
+ u32 l2[4];
+};
+
+struct nvc0_graph_zbc_depth {
+ u32 format;
+ u32 ds;
+ u32 l2;
+};
+
struct nvc0_graph_priv {
struct nouveau_graph base;
@@ -77,10 +94,15 @@ struct nvc0_graph_priv {
struct nvc0_graph_fuc fuc41ad;
bool firmware;
+ struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT];
+ struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
+
u8 rop_nr;
u8 gpc_nr;
u8 tpc_nr[GPC_MAX];
u8 tpc_total;
+ u8 ppc_nr[GPC_MAX];
+ u8 ppc_tpc_nr[GPC_MAX][4];
struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8;
@@ -118,12 +140,20 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_dtor(struct nouveau_object *);
int nvc0_graph_init(struct nouveau_object *);
+void nvc0_graph_zbc_init(struct nvc0_graph_priv *);
+
int nve4_graph_fini(struct nouveau_object *, bool);
int nve4_graph_init(struct nouveau_object *);
-extern struct nouveau_oclass nvc0_graph_sclass[];
+int nvf0_graph_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_ofuncs nvc0_fermi_ofuncs;
+extern struct nouveau_oclass nvc0_graph_sclass[];
+extern struct nouveau_omthds nvc0_graph_9097_omthds[];
+extern struct nouveau_omthds nvc0_graph_90c0_omthds[];
extern struct nouveau_oclass nvc8_graph_sclass[];
+extern struct nouveau_oclass nvf0_graph_sclass[];
struct nvc0_graph_init {
u32 addr;
@@ -149,6 +179,9 @@ struct nvc0_graph_ucode {
extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode;
extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode;
+extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode;
+extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode;
+
struct nvc0_graph_oclass {
struct nouveau_oclass base;
struct nouveau_oclass **cclass;
@@ -160,6 +193,7 @@ struct nvc0_graph_oclass {
struct {
struct nvc0_graph_ucode *ucode;
} gpccs;
+ int ppc_nr;
};
void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
@@ -223,9 +257,11 @@ extern const struct nvc0_graph_init nve4_graph_init_be_0[];
extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
+extern const struct nvc0_graph_init nvf0_graph_init_ds_0[];
extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvf0_graph_init_tex_0[];
extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
index 30cab0b2eba1..93d58e5b82c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
@@ -33,9 +33,9 @@ static struct nouveau_oclass
nvc1_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
- { 0x9197, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
index a6bf783e1256..692e1eda0eb4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
@@ -33,10 +33,10 @@ struct nouveau_oclass
nvc8_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
- { 0x9197, &nouveau_object_ofuncs },
- { 0x9297, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
index 2a6a94e2a041..41e8445c7eea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
@@ -133,4 +133,5 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nvd7_graph_pack_mmio,
.fecs.ucode = &nvd7_graph_fecs_ucode,
.gpccs.ucode = &nvd7_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index 51e0c075ad34..0c71f5c67ae0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <subdev/pwr.h>
+
#include "nvc0.h"
#include "ctxnvc0.h"
@@ -33,8 +35,8 @@ static struct nouveau_oclass
nve4_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa040, &nouveau_object_ofuncs },
- { 0xa097, &nouveau_object_ofuncs },
- { 0xa0c0, &nouveau_object_ofuncs },
+ { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -190,39 +192,20 @@ nve4_graph_pack_mmio[] = {
******************************************************************************/
int
-nve4_graph_fini(struct nouveau_object *object, bool suspend)
-{
- struct nvc0_graph_priv *priv = (void *)object;
-
- /*XXX: this is a nasty hack to power on gr on certain boards
- * where it's disabled by therm, somehow. ideally it'd
- * be nice to know when we should be doing this, and why,
- * but, it's yet to be determined. for now we test for
- * the particular mmio error that occurs in the situation,
- * and then bash therm in the way nvidia do.
- */
- nv_mask(priv, 0x000200, 0x08001000, 0x08001000);
- nv_rd32(priv, 0x000200);
- if (nv_rd32(priv, 0x400700) == 0xbadf1000) {
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_rd32(priv, 0x000200);
- nv_mask(priv, 0x020004, 0xc0000000, 0x40000000);
- }
-
- return nouveau_graph_fini(&priv->base, suspend);
-}
-
-int
nve4_graph_init(struct nouveau_object *object)
{
struct nvc0_graph_oclass *oclass = (void *)object->oclass;
struct nvc0_graph_priv *priv = (void *)object;
+ struct nouveau_pwr *ppwr = nouveau_pwr(priv);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int ret, i;
+ if (ppwr)
+ ppwr->pgob(ppwr, false);
+
ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
@@ -320,6 +303,9 @@ nve4_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x34ce3464);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -350,11 +336,12 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) {
.ctor = nvc0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve4_graph_init,
- .fini = nve4_graph_fini,
+ .fini = _nouveau_graph_fini,
},
.cclass = &nve4_grctx_oclass,
.sclass = nve4_graph_sclass,
.mmio = nve4_graph_pack_mmio,
.fecs.ucode = &nve4_graph_fecs_ucode,
.gpccs.ucode = &nve4_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index c96762122b9b..c306c0f2fc84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -29,12 +29,12 @@
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
+struct nouveau_oclass
nvf0_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xa197, &nouveau_object_ofuncs },
- { 0xa1c0, &nouveau_object_ofuncs },
+ { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -50,7 +50,7 @@ nvf0_graph_init_fe_0[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
@@ -88,7 +88,7 @@ nvf0_graph_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
@@ -170,7 +170,7 @@ nvf0_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
+int
nvf0_graph_fini(struct nouveau_object *object, bool suspend)
{
struct nvc0_graph_priv *priv = (void *)object;
@@ -209,7 +209,7 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
#include "fuc/hubnvf0.fuc.h"
-static struct nvc0_graph_ucode
+struct nvc0_graph_ucode
nvf0_graph_fecs_ucode = {
.code.data = nvf0_grhub_code,
.code.size = sizeof(nvf0_grhub_code),
@@ -219,7 +219,7 @@ nvf0_graph_fecs_ucode = {
#include "fuc/gpcnvf0.fuc.h"
-static struct nvc0_graph_ucode
+struct nvc0_graph_ucode
nvf0_graph_gpccs_ucode = {
.code.data = nvf0_grgpc_code,
.code.size = sizeof(nvf0_grgpc_code),
@@ -241,4 +241,5 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nvf0_graph_pack_mmio,
.fecs.ucode = &nvf0_graph_fecs_ucode,
.gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 7eb6d94c84e2..d88c700b2f69 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/handle.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index d4e7ec0ba68c..bdb2f20ff7b1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
index 3d8c2133e0e8..72c7f33fd29b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/client.h>
#include <core/engctx.h>
#include <core/handle.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 37a2bd9e8078..cae33f86b11a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
index 96f5aa92677b..e9cc8b116a24 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
index e9c5e51943ef..63013812f7c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -22,8 +22,11 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/option.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
#include <subdev/clock.h>
@@ -101,24 +104,28 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
* Perfmon object classes
******************************************************************************/
static int
-nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_query_v0 v0;
+ } *args = data;
struct nouveau_device *device = nv_device(object);
struct nouveau_perfmon *ppm = (void *)object->engine;
struct nouveau_perfdom *dom = NULL, *chk;
- struct nv_perfctr_query *args = data;
const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
const char *name;
int tmp = 0, di, si;
- char path[64];
-
- if (size < sizeof(*args))
- return -EINVAL;
+ int ret;
- di = (args->iter & 0xff000000) >> 24;
- si = (args->iter & 0x00ffffff) - 1;
+ nv_ioctl(object, "perfctr query size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "perfctr query vers %d iter %08x\n",
+ args->v0.version, args->v0.iter);
+ di = (args->v0.iter & 0xff000000) >> 24;
+ si = (args->v0.iter & 0x00ffffff) - 1;
+ } else
+ return ret;
list_for_each_entry(chk, &ppm->domains, head) {
if (tmp++ == di) {
@@ -132,19 +139,17 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
if (si >= 0) {
if (raw || !(name = dom->signal[si].name)) {
- snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
- name = path;
+ snprintf(args->v0.name, sizeof(args->v0.name),
+ "/%s/%02x", dom->name, si);
+ } else {
+ strncpy(args->v0.name, name, sizeof(args->v0.name));
}
-
- if (args->name)
- strncpy(args->name, name, args->size);
- args->size = strlen(name) + 1;
}
do {
while (++si < dom->signal_nr) {
if (all || dom->signal[si].name) {
- args->iter = (di << 24) | ++si;
+ args->v0.iter = (di << 24) | ++si;
return 0;
}
}
@@ -153,21 +158,26 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
dom = list_entry(dom->head.next, typeof(*dom), head);
} while (&dom->head != &ppm->domains);
- args->iter = 0xffffffff;
+ args->v0.iter = 0xffffffff;
return 0;
}
static int
-nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_sample none;
+ } *args = data;
struct nouveau_perfmon *ppm = (void *)object->engine;
struct nouveau_perfctr *ctr, *tmp;
struct nouveau_perfdom *dom;
- struct nv_perfctr_sample *args = data;
+ int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(object, "perfctr sample size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "perfctr sample\n");
+ } else
+ return ret;
ppm->sequence++;
list_for_each_entry(dom, &ppm->domains, head) {
@@ -206,22 +216,45 @@ nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
}
static int
-nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_read_v0 v0;
+ } *args = data;
struct nouveau_perfctr *ctr = (void *)object;
- struct nv_perfctr_read *args = data;
+ int ret;
+
+ nv_ioctl(object, "perfctr read size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+ } else
+ return ret;
- if (size < sizeof(*args))
- return -EINVAL;
if (!ctr->clk)
return -EAGAIN;
- args->clk = ctr->clk;
- args->ctr = ctr->ctr;
+ args->v0.clk = ctr->clk;
+ args->v0.ctr = ctr->ctr;
return 0;
}
+static int
+nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NVIF_PERFCTR_V0_QUERY:
+ return nouveau_perfctr_query(object, data, size);
+ case NVIF_PERFCTR_V0_SAMPLE:
+ return nouveau_perfctr_sample(object, data, size);
+ case NVIF_PERFCTR_V0_READ:
+ return nouveau_perfctr_read(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_perfctr_dtor(struct nouveau_object *object)
{
@@ -237,19 +270,27 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nvif_perfctr_v0 v0;
+ } *args = data;
struct nouveau_perfmon *ppm = (void *)engine;
struct nouveau_perfdom *dom = NULL;
struct nouveau_perfsig *sig[4] = {};
struct nouveau_perfctr *ctr;
- struct nv_perfctr_class *args = data;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create perfctr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
+ args->v0.version, args->v0.logic_op);
+ } else
+ return ret;
- for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
- sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
- args->signal[i].size, &dom);
+ for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
+ sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i],
+ strnlen(args->v0.name[i],
+ sizeof(args->v0.name[i])),
+ &dom);
if (!sig[i])
return -EINVAL;
}
@@ -260,7 +301,7 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
return ret;
ctr->slot = -1;
- ctr->logic_op = args->logic_op;
+ ctr->logic_op = args->v0.logic_op;
ctr->signal[0] = sig[0];
ctr->signal[1] = sig[1];
ctr->signal[2] = sig[2];
@@ -276,21 +317,13 @@ nouveau_perfctr_ofuncs = {
.dtor = nouveau_perfctr_dtor,
.init = nouveau_object_init,
.fini = nouveau_object_fini,
-};
-
-static struct nouveau_omthds
-nouveau_perfctr_omthds[] = {
- { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
- { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
- { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
- {}
+ .mthd = nouveau_perfctr_mthd,
};
struct nouveau_oclass
nouveau_perfmon_sclass[] = {
- { .handle = NV_PERFCTR_CLASS,
+ { .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
.ofuncs = &nouveau_perfctr_ofuncs,
- .omthds = nouveau_perfctr_omthds,
},
{},
};
@@ -303,6 +336,7 @@ nouveau_perfctx_dtor(struct nouveau_object *object)
{
struct nouveau_perfmon *ppm = (void *)object->engine;
mutex_lock(&nv_subdev(ppm)->mutex);
+ nouveau_engctx_destroy(&ppm->context->base);
ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index c571758e4a27..64df15c7f051 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a62f11a78430..f54a2253deca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index f3b4d9dbf23c..4d2994d8cc32 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -23,12 +23,12 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
#include <core/event.h>
+#include <nvif/event.h>
#include <subdev/bar.h>
@@ -86,10 +86,10 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
u32 head = *(u32 *)args;
- if (head >= chan->vblank.nr_event)
+ if (head >= nouveau_disp(chan)->vblank.index_nr)
return -EINVAL;
- nouveau_event_get(chan->vblank.event[head]);
+ nvkm_notify_get(&chan->vblank.notify[head]);
return 0;
}
@@ -124,9 +124,10 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
-nv50_software_vblsem_release(void *data, u32 type, int head)
+nv50_software_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan = data;
+ struct nv50_software_chan *chan =
+ container_of(notify, typeof(*chan), vblank.notify[notify->index]);
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
@@ -142,7 +143,7 @@ nv50_software_vblsem_release(void *data, u32 type, int head)
nv_wr32(priv, 0x060014, chan->vblank.value);
}
- return NVKM_EVENT_DROP;
+ return NVKM_NOTIFY_DROP;
}
void
@@ -151,11 +152,8 @@ nv50_software_context_dtor(struct nouveau_object *object)
struct nv50_software_chan *chan = (void *)object;
int i;
- if (chan->vblank.event) {
- for (i = 0; i < chan->vblank.nr_event; i++)
- nouveau_event_ref(NULL, &chan->vblank.event[i]);
- kfree(chan->vblank.event);
- }
+ for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
+ nvkm_notify_fini(&chan->vblank.notify[i]);
nouveau_software_context_destroy(&chan->base);
}
@@ -176,15 +174,14 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
- chan->vblank.event = kzalloc(chan->vblank.nr_event *
- sizeof(*chan->vblank.event), GFP_KERNEL);
- if (!chan->vblank.event)
- return -ENOMEM;
-
- for (i = 0; i < chan->vblank.nr_event; i++) {
- ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank,
- chan, &chan->vblank.event[i]);
+ for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
+ ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false,
+ &(struct nvif_notify_head_req_v0) {
+ .head = i,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &chan->vblank.notify[i]);
if (ret)
return ret;
}
@@ -198,7 +195,7 @@ nv50_software_cclass = {
.base.handle = NV_ENGCTX(SW, 0x50),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
+ .dtor = nv50_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
index bb49a7a20857..41542e725b4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -19,14 +19,13 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
struct nv50_software_cclass {
struct nouveau_oclass base;
- int (*vblank)(void *, u32, int);
+ int (*vblank)(struct nvkm_notify *);
};
struct nv50_software_chan {
struct nouveau_software_chan base;
struct {
- struct nouveau_eventh **event;
- int nr_event;
+ struct nvkm_notify notify[4];
u32 channel;
u32 ctxdma;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 135c20f38356..6af370d3a06d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/event.h>
@@ -104,9 +103,10 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
-nvc0_software_vblsem_release(void *data, u32 type, int head)
+nvc0_software_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan = data;
+ struct nv50_software_chan *chan =
+ container_of(notify, typeof(*chan), vblank.notify[notify->index]);
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
@@ -116,7 +116,7 @@ nvc0_software_vblsem_release(void *data, u32 type, int head)
nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
nv_wr32(priv, 0x060014, chan->vblank.value);
- return NVKM_EVENT_DROP;
+ return NVKM_NOTIFY_DROP;
}
static struct nv50_software_cclass
@@ -124,7 +124,7 @@ nvc0_software_cclass = {
.base.handle = NV_ENGCTX(SW, 0xc0),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
+ .dtor = nv50_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
deleted file mode 100644
index e0c812bc884f..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ /dev/null
@@ -1,470 +0,0 @@
-#ifndef __NOUVEAU_CLASS_H__
-#define __NOUVEAU_CLASS_H__
-
-/* Device class
- *
- * 0080: NV_DEVICE
- */
-#define NV_DEVICE_CLASS 0x00000080
-
-#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL
-#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL
-#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL
-#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL
-#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL
-#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL
-#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL
-#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL
-#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL
-#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL
-#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL
-#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL
-#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
-#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
-#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
-#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
-#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
-
-struct nv_device_class {
- u64 device; /* device identifier, ~0 for client default */
- u64 disable; /* disable particular subsystems */
- u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
-};
-
-/* DMA object classes
- *
- * 0002: NV_DMA_FROM_MEMORY
- * 0003: NV_DMA_TO_MEMORY
- * 003d: NV_DMA_IN_MEMORY
- */
-#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
-#define NV_DMA_TO_MEMORY_CLASS 0x00000003
-#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
-
-#define NV_DMA_TARGET_MASK 0x000000ff
-#define NV_DMA_TARGET_VM 0x00000000
-#define NV_DMA_TARGET_VRAM 0x00000001
-#define NV_DMA_TARGET_PCI 0x00000002
-#define NV_DMA_TARGET_PCI_US 0x00000003
-#define NV_DMA_TARGET_AGP 0x00000004
-#define NV_DMA_ACCESS_MASK 0x00000f00
-#define NV_DMA_ACCESS_VM 0x00000000
-#define NV_DMA_ACCESS_RD 0x00000100
-#define NV_DMA_ACCESS_WR 0x00000200
-#define NV_DMA_ACCESS_RDWR 0x00000300
-
-/* NV50:NVC0 */
-#define NV50_DMA_CONF0_ENABLE 0x80000000
-#define NV50_DMA_CONF0_PRIV 0x00300000
-#define NV50_DMA_CONF0_PRIV_VM 0x00000000
-#define NV50_DMA_CONF0_PRIV_US 0x00100000
-#define NV50_DMA_CONF0_PRIV__S 0x00200000
-#define NV50_DMA_CONF0_PART 0x00030000
-#define NV50_DMA_CONF0_PART_VM 0x00000000
-#define NV50_DMA_CONF0_PART_256 0x00010000
-#define NV50_DMA_CONF0_PART_1KB 0x00020000
-#define NV50_DMA_CONF0_COMP 0x00000180
-#define NV50_DMA_CONF0_COMP_NONE 0x00000000
-#define NV50_DMA_CONF0_COMP_VM 0x00000180
-#define NV50_DMA_CONF0_TYPE 0x0000007f
-#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
-
-/* NVC0:NVD9 */
-#define NVC0_DMA_CONF0_ENABLE 0x80000000
-#define NVC0_DMA_CONF0_PRIV 0x00300000
-#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
-#define NVC0_DMA_CONF0_PRIV_US 0x00100000
-#define NVC0_DMA_CONF0_PRIV__S 0x00200000
-#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
-#define NVC0_DMA_CONF0_TYPE 0x000000ff
-#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
-
-/* NVD9- */
-#define NVD0_DMA_CONF0_ENABLE 0x80000000
-#define NVD0_DMA_CONF0_PAGE 0x00000400
-#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
-#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
-#define NVD0_DMA_CONF0_TYPE 0x000000ff
-#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
-
-struct nv_dma_class {
- u32 flags;
- u32 pad0;
- u64 start;
- u64 limit;
- u32 conf0;
-};
-
-/* Perfmon counter class
- *
- * XXXX: NV_PERFCTR
- */
-#define NV_PERFCTR_CLASS 0x0000ffff
-#define NV_PERFCTR_QUERY 0x00000000
-#define NV_PERFCTR_SAMPLE 0x00000001
-#define NV_PERFCTR_READ 0x00000002
-
-struct nv_perfctr_class {
- u16 logic_op;
- struct {
- char __user *name; /*XXX: use cfu when exposed to userspace */
- u32 size;
- } signal[4];
-};
-
-struct nv_perfctr_query {
- u32 iter;
- u32 size;
- char __user *name; /*XXX: use ctu when exposed to userspace */
-};
-
-struct nv_perfctr_sample {
-};
-
-struct nv_perfctr_read {
- u32 ctr;
- u32 clk;
-};
-
-/* Device control class
- *
- * XXXX: NV_CONTROL
- */
-#define NV_CONTROL_CLASS 0x0000fffe
-
-#define NV_CONTROL_PSTATE_INFO 0x00000000
-#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
-#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
-#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
-#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
-#define NV_CONTROL_PSTATE_ATTR 0x00000001
-#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
-#define NV_CONTROL_PSTATE_USER 0x00000002
-#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
-#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
-
-struct nv_control_pstate_info {
- u32 count; /* out: number of power states */
- s32 ustate; /* out: current target pstate index */
- u32 pstate; /* out: current pstate index */
-};
-
-struct nv_control_pstate_attr {
- s32 state; /* in: index of pstate to query
- * out: pstate identifier
- */
- u32 index; /* in: index of attribute to query
- * out: index of next attribute, or 0 if no more
- */
- char name[32];
- char unit[16];
- u32 min;
- u32 max;
-};
-
-struct nv_control_pstate_user {
- s32 state; /* in: pstate identifier */
-};
-
-/* DMA FIFO channel classes
- *
- * 006b: NV03_CHANNEL_DMA
- * 006e: NV10_CHANNEL_DMA
- * 176e: NV17_CHANNEL_DMA
- * 406e: NV40_CHANNEL_DMA
- * 506e: NV50_CHANNEL_DMA
- * 826e: NV84_CHANNEL_DMA
- */
-#define NV03_CHANNEL_DMA_CLASS 0x0000006b
-#define NV10_CHANNEL_DMA_CLASS 0x0000006e
-#define NV17_CHANNEL_DMA_CLASS 0x0000176e
-#define NV40_CHANNEL_DMA_CLASS 0x0000406e
-#define NV50_CHANNEL_DMA_CLASS 0x0000506e
-#define NV84_CHANNEL_DMA_CLASS 0x0000826e
-
-struct nv03_channel_dma_class {
- u32 pushbuf;
- u32 pad0;
- u64 offset;
-};
-
-/* Indirect FIFO channel classes
- *
- * 506f: NV50_CHANNEL_IND
- * 826f: NV84_CHANNEL_IND
- * 906f: NVC0_CHANNEL_IND
- * a06f: NVE0_CHANNEL_IND
- */
-
-#define NV50_CHANNEL_IND_CLASS 0x0000506f
-#define NV84_CHANNEL_IND_CLASS 0x0000826f
-#define NVC0_CHANNEL_IND_CLASS 0x0000906f
-#define NVE0_CHANNEL_IND_CLASS 0x0000a06f
-
-struct nv50_channel_ind_class {
- u32 pushbuf;
- u32 ilength;
- u64 ioffset;
-};
-
-#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001
-#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002
-#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004
-#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008
-#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010
-#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020
-#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040
-
-struct nve0_channel_ind_class {
- u32 pushbuf;
- u32 ilength;
- u64 ioffset;
- u32 engine;
-};
-
-/* 0046: NV04_DISP
- */
-
-#define NV04_DISP_CLASS 0x00000046
-
-#define NV04_DISP_MTHD 0x00000000
-#define NV04_DISP_MTHD_HEAD 0x00000001
-
-#define NV04_DISP_SCANOUTPOS 0x00000000
-
-struct nv04_display_class {
-};
-
-struct nv04_display_scanoutpos {
- s64 time[2];
- u32 vblanks;
- u32 vblanke;
- u32 vtotal;
- u32 vline;
- u32 hblanks;
- u32 hblanke;
- u32 htotal;
- u32 hline;
-};
-
-/* 5070: NV50_DISP
- * 8270: NV84_DISP
- * 8370: NVA0_DISP
- * 8870: NV94_DISP
- * 8570: NVA3_DISP
- * 9070: NVD0_DISP
- * 9170: NVE0_DISP
- * 9270: NVF0_DISP
- * 9470: GM107_DISP
- */
-
-#define NV50_DISP_CLASS 0x00005070
-#define NV84_DISP_CLASS 0x00008270
-#define NVA0_DISP_CLASS 0x00008370
-#define NV94_DISP_CLASS 0x00008870
-#define NVA3_DISP_CLASS 0x00008570
-#define NVD0_DISP_CLASS 0x00009070
-#define NVE0_DISP_CLASS 0x00009170
-#define NVF0_DISP_CLASS 0x00009270
-#define GM107_DISP_CLASS 0x00009470
-
-#define NV50_DISP_MTHD 0x00000000
-#define NV50_DISP_MTHD_HEAD 0x00000003
-
-#define NV50_DISP_SCANOUTPOS 0x00000000
-
-#define NV50_DISP_SOR_MTHD 0x00010000
-#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
-#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
-#define NV50_DISP_SOR_MTHD_LINK 0x00000004
-#define NV50_DISP_SOR_MTHD_OR 0x00000003
-
-#define NV50_DISP_SOR_PWR 0x00010000
-#define NV50_DISP_SOR_PWR_STATE 0x00000001
-#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
-#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
-#define NVA3_DISP_SOR_HDA_ELD 0x00010100
-#define NV84_DISP_SOR_HDMI_PWR 0x00012000
-#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
-#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
-#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
-#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
-#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
-#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
-#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
-#define NV94_DISP_SOR_DP_PWR 0x00016000
-#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001
-#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000
-#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001
-
-#define NV50_DISP_DAC_MTHD 0x00020000
-#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
-#define NV50_DISP_DAC_MTHD_OR 0x00000003
-
-#define NV50_DISP_DAC_PWR 0x00020000
-#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
-#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
-#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
-#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
-#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
-#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
-#define NV50_DISP_DAC_PWR_DATA 0x00000010
-#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
-#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
-#define NV50_DISP_DAC_PWR_STATE 0x00000040
-#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
-#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
-#define NV50_DISP_DAC_LOAD 0x00020100
-#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
-
-#define NV50_DISP_PIOR_MTHD 0x00030000
-#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
-#define NV50_DISP_PIOR_MTHD_OR 0x00000003
-
-#define NV50_DISP_PIOR_PWR 0x00030000
-#define NV50_DISP_PIOR_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
-#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
-#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
-#define NV50_DISP_PIOR_DP_PWR 0x00036000
-#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
-
-struct nv50_display_class {
-};
-
-/* 507a: NV50_DISP_CURS
- * 827a: NV84_DISP_CURS
- * 837a: NVA0_DISP_CURS
- * 887a: NV94_DISP_CURS
- * 857a: NVA3_DISP_CURS
- * 907a: NVD0_DISP_CURS
- * 917a: NVE0_DISP_CURS
- * 927a: NVF0_DISP_CURS
- * 947a: GM107_DISP_CURS
- */
-
-#define NV50_DISP_CURS_CLASS 0x0000507a
-#define NV84_DISP_CURS_CLASS 0x0000827a
-#define NVA0_DISP_CURS_CLASS 0x0000837a
-#define NV94_DISP_CURS_CLASS 0x0000887a
-#define NVA3_DISP_CURS_CLASS 0x0000857a
-#define NVD0_DISP_CURS_CLASS 0x0000907a
-#define NVE0_DISP_CURS_CLASS 0x0000917a
-#define NVF0_DISP_CURS_CLASS 0x0000927a
-#define GM107_DISP_CURS_CLASS 0x0000947a
-
-struct nv50_display_curs_class {
- u32 head;
-};
-
-/* 507b: NV50_DISP_OIMM
- * 827b: NV84_DISP_OIMM
- * 837b: NVA0_DISP_OIMM
- * 887b: NV94_DISP_OIMM
- * 857b: NVA3_DISP_OIMM
- * 907b: NVD0_DISP_OIMM
- * 917b: NVE0_DISP_OIMM
- * 927b: NVE0_DISP_OIMM
- * 947b: GM107_DISP_OIMM
- */
-
-#define NV50_DISP_OIMM_CLASS 0x0000507b
-#define NV84_DISP_OIMM_CLASS 0x0000827b
-#define NVA0_DISP_OIMM_CLASS 0x0000837b
-#define NV94_DISP_OIMM_CLASS 0x0000887b
-#define NVA3_DISP_OIMM_CLASS 0x0000857b
-#define NVD0_DISP_OIMM_CLASS 0x0000907b
-#define NVE0_DISP_OIMM_CLASS 0x0000917b
-#define NVF0_DISP_OIMM_CLASS 0x0000927b
-#define GM107_DISP_OIMM_CLASS 0x0000947b
-
-struct nv50_display_oimm_class {
- u32 head;
-};
-
-/* 507c: NV50_DISP_SYNC
- * 827c: NV84_DISP_SYNC
- * 837c: NVA0_DISP_SYNC
- * 887c: NV94_DISP_SYNC
- * 857c: NVA3_DISP_SYNC
- * 907c: NVD0_DISP_SYNC
- * 917c: NVE0_DISP_SYNC
- * 927c: NVF0_DISP_SYNC
- * 947c: GM107_DISP_SYNC
- */
-
-#define NV50_DISP_SYNC_CLASS 0x0000507c
-#define NV84_DISP_SYNC_CLASS 0x0000827c
-#define NVA0_DISP_SYNC_CLASS 0x0000837c
-#define NV94_DISP_SYNC_CLASS 0x0000887c
-#define NVA3_DISP_SYNC_CLASS 0x0000857c
-#define NVD0_DISP_SYNC_CLASS 0x0000907c
-#define NVE0_DISP_SYNC_CLASS 0x0000917c
-#define NVF0_DISP_SYNC_CLASS 0x0000927c
-#define GM107_DISP_SYNC_CLASS 0x0000947c
-
-struct nv50_display_sync_class {
- u32 pushbuf;
- u32 head;
-};
-
-/* 507d: NV50_DISP_MAST
- * 827d: NV84_DISP_MAST
- * 837d: NVA0_DISP_MAST
- * 887d: NV94_DISP_MAST
- * 857d: NVA3_DISP_MAST
- * 907d: NVD0_DISP_MAST
- * 917d: NVE0_DISP_MAST
- * 927d: NVF0_DISP_MAST
- * 947d: GM107_DISP_MAST
- */
-
-#define NV50_DISP_MAST_CLASS 0x0000507d
-#define NV84_DISP_MAST_CLASS 0x0000827d
-#define NVA0_DISP_MAST_CLASS 0x0000837d
-#define NV94_DISP_MAST_CLASS 0x0000887d
-#define NVA3_DISP_MAST_CLASS 0x0000857d
-#define NVD0_DISP_MAST_CLASS 0x0000907d
-#define NVE0_DISP_MAST_CLASS 0x0000917d
-#define NVF0_DISP_MAST_CLASS 0x0000927d
-#define GM107_DISP_MAST_CLASS 0x0000947d
-
-struct nv50_display_mast_class {
- u32 pushbuf;
-};
-
-/* 507e: NV50_DISP_OVLY
- * 827e: NV84_DISP_OVLY
- * 837e: NVA0_DISP_OVLY
- * 887e: NV94_DISP_OVLY
- * 857e: NVA3_DISP_OVLY
- * 907e: NVD0_DISP_OVLY
- * 917e: NVE0_DISP_OVLY
- * 927e: NVF0_DISP_OVLY
- * 947e: GM107_DISP_OVLY
- */
-
-#define NV50_DISP_OVLY_CLASS 0x0000507e
-#define NV84_DISP_OVLY_CLASS 0x0000827e
-#define NVA0_DISP_OVLY_CLASS 0x0000837e
-#define NV94_DISP_OVLY_CLASS 0x0000887e
-#define NVA3_DISP_OVLY_CLASS 0x0000857e
-#define NVD0_DISP_OVLY_CLASS 0x0000907e
-#define NVE0_DISP_OVLY_CLASS 0x0000917e
-#define NVF0_DISP_OVLY_CLASS 0x0000927e
-#define GM107_DISP_OVLY_CLASS 0x0000947e
-
-struct nv50_display_ovly_class {
- u32 pushbuf;
- u32 head;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index c66eac513803..1794a05205d8 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -10,6 +10,11 @@ struct nouveau_client {
char name[32];
u32 debug;
struct nouveau_vm *vm;
+ bool super;
+ void *data;
+
+ int (*ntfy)(const void *, u32, const void *, u32);
+ struct nvkm_client_notify *notify[16];
};
static inline struct nouveau_client *
@@ -43,4 +48,10 @@ int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
const char *nouveau_client_name(void *obj);
+int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *,
+ void *data, u32 size);
+int nvkm_client_notify_del(struct nouveau_client *, int index);
+int nvkm_client_notify_get(struct nouveau_client *, int index);
+int nvkm_client_notify_put(struct nouveau_client *, int index);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index a8a9a9cf16cb..8743766454a5 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -4,6 +4,7 @@
#include <core/object.h>
#include <core/subdev.h>
#include <core/engine.h>
+#include <core/event.h>
enum nv_subdev_type {
NVDEV_ENGINE_DEVICE,
@@ -28,7 +29,7 @@ enum nv_subdev_type {
NVDEV_SUBDEV_BUS,
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
- NVDEV_SUBDEV_LTCG,
+ NVDEV_SUBDEV_LTC,
NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_INSTMEM,
NVDEV_SUBDEV_VM,
@@ -69,6 +70,8 @@ struct nouveau_device {
struct platform_device *platformdev;
u64 handle;
+ struct nvkm_event event;
+
const char *cfgopt;
const char *dbgopt;
const char *name;
@@ -84,7 +87,6 @@ struct nouveau_device {
NV_40 = 0x40,
NV_50 = 0x50,
NV_C0 = 0xc0,
- NV_D0 = 0xd0,
NV_E0 = 0xe0,
GM100 = 0x110,
} card_type;
@@ -93,8 +95,14 @@ struct nouveau_device {
struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+
+ struct {
+ struct notifier_block nb;
+ } acpi;
};
+int nouveau_device_list(u64 *name, int size);
+
static inline struct nouveau_device *
nv_device(void *obj)
{
@@ -162,12 +170,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
resource_size_t
nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page);
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
-
int
nv_device_get_irq(struct nouveau_device *device, bool stall);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index ba3f1a76a815..51e55d03330a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -1,47 +1,34 @@
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
-/* return codes from event handlers */
-#define NVKM_EVENT_DROP 0
-#define NVKM_EVENT_KEEP 1
+#include <core/notify.h>
-/* nouveau_eventh.flags bit #s */
-#define NVKM_EVENT_ENABLE 0
-
-struct nouveau_eventh {
- struct nouveau_event *event;
- struct list_head head;
- unsigned long flags;
- u32 types;
- int index;
- int (*func)(void *, u32, int);
- void *priv;
+struct nvkm_event_func {
+ int (*ctor)(void *data, u32 size, struct nvkm_notify *);
+ void (*send)(void *data, u32 size, struct nvkm_notify *);
+ void (*init)(struct nvkm_event *, int type, int index);
+ void (*fini)(struct nvkm_event *, int type, int index);
};
-struct nouveau_event {
- void *priv;
- int (*check)(struct nouveau_event *, u32 type, int index);
- void (*enable)(struct nouveau_event *, int type, int index);
- void (*disable)(struct nouveau_event *, int type, int index);
+struct nvkm_event {
+ const struct nvkm_event_func *func;
int types_nr;
int index_nr;
- spinlock_t list_lock;
- struct list_head *list;
spinlock_t refs_lock;
- int refs[];
+ spinlock_t list_lock;
+ struct list_head list;
+ int *refs;
};
-int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **);
-void nouveau_event_destroy(struct nouveau_event **);
-void nouveau_event_trigger(struct nouveau_event *, u32 types, int index);
-
-int nouveau_event_new(struct nouveau_event *, u32 types, int index,
- int (*func)(void *, u32, int), void *,
- struct nouveau_eventh **);
-void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
-void nouveau_event_get(struct nouveau_eventh *);
-void nouveau_event_put(struct nouveau_eventh *);
+int nvkm_event_init(const struct nvkm_event_func *func,
+ int types_nr, int index_nr,
+ struct nvkm_event *);
+void nvkm_event_fini(struct nvkm_event *);
+void nvkm_event_get(struct nvkm_event *, u32 types, int index);
+void nvkm_event_put(struct nvkm_event *, u32 types, int index);
+void nvkm_event_send(struct nvkm_event *, u32 types, int index,
+ void *data, u32 size);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index 363674cdf8ab..ceb67d770875 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -10,6 +10,9 @@ struct nouveau_handle {
u32 name;
u32 priv;
+ u8 route;
+ u64 token;
+
struct nouveau_handle *parent;
struct nouveau_object *object;
};
@@ -20,6 +23,11 @@ void nouveau_handle_destroy(struct nouveau_handle *);
int nouveau_handle_init(struct nouveau_handle *);
int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
+int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
+ u16 oclass, void *data, u32 size,
+ struct nouveau_object **);
+int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
+
struct nouveau_object *
nouveau_handle_ref(struct nouveau_object *, u32 name);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
new file mode 100644
index 000000000000..ac7935c2474e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_IOCTL_H__
+#define __NVKM_IOCTL_H__
+
+int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h
new file mode 100644
index 000000000000..1262d8f020f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_NOTIFY_H__
+#define __NVKM_NOTIFY_H__
+
+struct nvkm_notify {
+ struct nvkm_event *event;
+ struct list_head head;
+#define NVKM_NOTIFY_USER 0
+#define NVKM_NOTIFY_WORK 1
+ unsigned long flags;
+ int block;
+#define NVKM_NOTIFY_DROP 0
+#define NVKM_NOTIFY_KEEP 1
+ int (*func)(struct nvkm_notify *);
+
+ /* set by nvkm_event ctor */
+ u32 types;
+ int index;
+ u32 size;
+
+ struct work_struct work;
+ /* this is const for a *very* good reason - the data might be on the
+ * stack from an irq handler. if you're not core/notify.c then you
+ * should probably think twice before casting it away...
+ */
+ const void *data;
+};
+
+int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *),
+ bool work, void *data, u32 size, u32 reply,
+ struct nvkm_notify *);
+void nvkm_notify_fini(struct nvkm_notify *);
+void nvkm_notify_get(struct nvkm_notify *);
+void nvkm_notify_put(struct nvkm_notify *);
+void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 62e68baef087..d7039482d6fd 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -48,6 +48,10 @@ void nouveau_object_destroy(struct nouveau_object *);
int nouveau_object_init(struct nouveau_object *);
int nouveau_object_fini(struct nouveau_object *, bool suspend);
+int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
extern struct nouveau_ofuncs nouveau_object_ofuncs;
/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
@@ -78,6 +82,7 @@ struct nouveau_omthds {
int (*call)(struct nouveau_object *, u32, void *, u32);
};
+struct nvkm_event;
struct nouveau_ofuncs {
int (*ctor)(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *data, u32 size,
@@ -85,6 +90,9 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
+ int (*mthd)(struct nouveau_object *, u32, void *, u32);
+ int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **);
+ int (* map)(struct nouveau_object *, u64 *, u32 *);
u8 (*rd08)(struct nouveau_object *, u64 offset);
u16 (*rd16)(struct nouveau_object *, u64 offset);
u32 (*rd32)(struct nouveau_object *, u64 offset);
@@ -106,10 +114,6 @@ void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
int nouveau_object_inc(struct nouveau_object *);
int nouveau_object_dec(struct nouveau_object *, bool suspend);
-int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **);
-int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
@@ -199,4 +203,21 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
return 0;
}
+#include <core/handle.h>
+
+static inline int
+nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
+ u16 oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ return nouveau_handle_new(client, parent, handle, oclass,
+ data, size, pobject);
+}
+
+static inline int
+nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
+{
+ return nouveau_handle_del(client, parent, handle);
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 9f5ea900ff00..12da418ec70a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -57,5 +57,6 @@ void _nouveau_parent_dtor(struct nouveau_object *);
int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
struct nouveau_object **pengine,
struct nouveau_oclass **poclass);
+int nouveau_parent_lclass(struct nouveau_object *, u32 *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 0f9a37bd32b0..451b6ed20b7e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -21,6 +21,7 @@ nv_printk_(struct nouveau_object *, int, const char *, ...);
#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+#define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a)
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index fde842896806..7a64f347b385 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -6,20 +6,13 @@
#include <core/device.h>
#include <core/event.h>
-enum nvkm_hpd_event {
- NVKM_HPD_PLUG = 1,
- NVKM_HPD_UNPLUG = 2,
- NVKM_HPD_IRQ = 4,
- NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ)
-};
-
struct nouveau_disp {
struct nouveau_engine base;
struct list_head outp;
- struct nouveau_event *hpd;
- struct nouveau_event *vblank;
+ struct nvkm_event hpd;
+ struct nvkm_event vblank;
};
static inline struct nouveau_disp *
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index b28914ed1752..1b283a7b78e6 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,37 +12,20 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
- u32 conf0;
};
struct nouveau_dmaeng {
struct nouveau_engine base;
/* creates a "physical" dma object from a struct nouveau_dmaobj */
- int (*bind)(struct nouveau_dmaeng *dmaeng,
+ int (*bind)(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **);
};
-#define nouveau_dmaeng_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
-#define nouveau_dmaeng_destroy(p) \
- nouveau_engine_destroy(&(p)->base)
-#define nouveau_dmaeng_init(p) \
- nouveau_engine_init(&(p)->base)
-#define nouveau_dmaeng_fini(p,s) \
- nouveau_engine_fini(&(p)->base, (s))
-
-#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
-#define _nouveau_dmaeng_init _nouveau_engine_init
-#define _nouveau_dmaeng_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass nv04_dmaeng_oclass;
-extern struct nouveau_oclass nv50_dmaeng_oclass;
-extern struct nouveau_oclass nvc0_dmaeng_oclass;
-extern struct nouveau_oclass nvd0_dmaeng_oclass;
-
-extern struct nouveau_oclass nouveau_dmaobj_sclass[];
+extern struct nouveau_oclass *nv04_dmaeng_oclass;
+extern struct nouveau_oclass *nv50_dmaeng_oclass;
+extern struct nouveau_oclass *nvc0_dmaeng_oclass;
+extern struct nouveau_oclass *nvd0_dmaeng_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b639eb2c74ff..e5e4d930b2c2 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -4,12 +4,14 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engine.h>
+#include <core/event.h>
struct nouveau_fifo_chan {
struct nouveau_namedb base;
struct nouveau_dmaobj *pushdma;
struct nouveau_gpuobj *pushgpu;
void __iomem *user;
+ u64 addr;
u32 size;
u16 chid;
atomic_t refcnt; /* NV04_NVSW_SET_REF */
@@ -40,8 +42,10 @@ void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
+int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *);
u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
+int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
@@ -65,8 +69,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
- struct nouveau_event *cevent; /* channel creation event */
- struct nouveau_event *uevent; /* async user trigger */
+ struct nvkm_event cevent; /* channel creation event */
+ struct nvkm_event uevent; /* async user trigger */
struct nouveau_object **channel;
spinlock_t lock;
@@ -112,6 +116,9 @@ extern struct nouveau_oclass *nve0_fifo_oclass;
extern struct nouveau_oclass *gk20a_fifo_oclass;
extern struct nouveau_oclass *nv108_fifo_oclass;
+int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *);
+void nouveau_fifo_uevent(struct nouveau_fifo *);
+
void nv04_fifo_intr(struct nouveau_subdev *);
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8c1d4772da0c..d5055570d01b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -70,6 +70,7 @@ extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
extern struct nouveau_oclass *gk20a_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *gk110b_graph_oclass;
extern struct nouveau_oclass *nv108_graph_oclass;
extern struct nouveau_oclass *gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
index 49b0024910fe..88cc812baaa3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -4,7 +4,6 @@
#include <core/device.h>
#include <core/engine.h>
#include <core/engctx.h>
-#include <core/class.h>
struct nouveau_perfdom;
struct nouveau_perfctr;
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
new file mode 120000
index 000000000000..f1ac4859edd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
@@ -0,0 +1 @@
+../../../nvif/class.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
new file mode 120000
index 000000000000..1b798538a725
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
@@ -0,0 +1 @@
+../../../nvif/event.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
new file mode 120000
index 000000000000..8569c86907c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
@@ -0,0 +1 @@
+../../../nvif/ioctl.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
new file mode 120000
index 000000000000..69d99292bca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
@@ -0,0 +1 @@
+../../../nvif/unpack.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 9faa98e67ad8..be037fac534c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -20,6 +20,9 @@ struct nouveau_bar {
u32 flags, struct nouveau_vma *);
void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
void (*flush)(struct nouveau_bar *);
+
+ /* whether the BAR supports to be ioremapped WC or should be uncached */
+ bool iomap_uncached;
};
static inline struct nouveau_bar *
@@ -30,5 +33,6 @@ nouveau_bar(void *obj)
extern struct nouveau_oclass nv50_bar_oclass;
extern struct nouveau_oclass nvc0_bar_oclass;
+extern struct nouveau_oclass gk20a_bar_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index c01e29c9f89a..a5ca00dd2f61 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -71,8 +71,15 @@ struct nouveau_clock {
struct list_head states;
int state_nr;
+ struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t waiting;
+
+ struct nvkm_notify pwrsrc_ntfy;
+ int pwrsrc;
int pstate; /* current */
- int ustate; /* user-requested (-1 disabled, -2 perfmon) */
+ int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
+ int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
@@ -108,8 +115,9 @@ struct nouveau_clocks {
int mdiv;
};
-#define nouveau_clock_create(p,e,o,i,r,d) \
- nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d)
+#define nouveau_clock_create(p,e,o,i,r,s,n,d) \
+ nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
+ (void **)d)
#define nouveau_clock_destroy(p) ({ \
struct nouveau_clock *clk = (p); \
_nouveau_clock_dtor(nv_object(clk)); \
@@ -118,15 +126,18 @@ struct nouveau_clocks {
struct nouveau_clock *clk = (p); \
_nouveau_clock_init(nv_object(clk)); \
})
-#define nouveau_clock_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
+#define nouveau_clock_fini(p,s) ({ \
+ struct nouveau_clock *clk = (p); \
+ _nouveau_clock_fini(nv_object(clk), (s)); \
+})
int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *,
- struct nouveau_clocks *, bool, int, void **);
+ struct nouveau_clocks *, struct nouveau_pstate *,
+ int, bool, int, void **);
void _nouveau_clock_dtor(struct nouveau_object *);
-int _nouveau_clock_init(struct nouveau_object *);
-#define _nouveau_clock_fini _nouveau_subdev_fini
+int _nouveau_clock_init(struct nouveau_object *);
+int _nouveau_clock_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
@@ -136,6 +147,7 @@ extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass;
+extern struct nouveau_oclass gk20a_clock_oclass;
int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -145,7 +157,7 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
int clk, struct nouveau_pll_vals *);
-int nouveau_clock_ustate(struct nouveau_clock *, int req);
+int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr);
int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 612d82ab683d..b73733d21cc7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -8,16 +8,22 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
-enum nvkm_gpio_event {
- NVKM_GPIO_HI = 1,
- NVKM_GPIO_LO = 2,
- NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO),
+struct nvkm_gpio_ntfy_req {
+#define NVKM_GPIO_HI 0x01
+#define NVKM_GPIO_LO 0x02
+#define NVKM_GPIO_TOGGLED 0x03
+ u8 mask;
+ u8 line;
+};
+
+struct nvkm_gpio_ntfy_rep {
+ u8 mask;
};
struct nouveau_gpio {
struct nouveau_subdev base;
- struct nouveau_event *events;
+ struct nvkm_event event;
void (*reset)(struct nouveau_gpio *, u8 func);
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 825f7bb46b67..1b937c2c25ae 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -14,15 +14,18 @@
#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-enum nvkm_i2c_event {
- NVKM_I2C_PLUG = 1,
- NVKM_I2C_UNPLUG = 2,
- NVKM_I2C_IRQ = 4,
- NVKM_I2C_DONE = 8,
- NVKM_I2C_ANY = (NVKM_I2C_PLUG |
- NVKM_I2C_UNPLUG |
- NVKM_I2C_IRQ |
- NVKM_I2C_DONE),
+struct nvkm_i2c_ntfy_req {
+#define NVKM_I2C_PLUG 0x01
+#define NVKM_I2C_UNPLUG 0x02
+#define NVKM_I2C_IRQ 0x04
+#define NVKM_I2C_DONE 0x08
+#define NVKM_I2C_ANY 0x0f
+ u8 mask;
+ u8 port;
+};
+
+struct nvkm_i2c_ntfy_rep {
+ u8 mask;
};
struct nouveau_i2c_port {
@@ -56,7 +59,7 @@ struct nouveau_i2c_board_info {
struct nouveau_i2c {
struct nouveau_subdev base;
- struct nouveau_event *ntfy;
+ struct nvkm_event event;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
new file mode 100644
index 000000000000..b909a7363f6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
@@ -0,0 +1,35 @@
+#ifndef __NOUVEAU_LTC_H__
+#define __NOUVEAU_LTC_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#define NOUVEAU_LTC_MAX_ZBC_CNT 16
+
+struct nouveau_mm_node;
+
+struct nouveau_ltc {
+ struct nouveau_subdev base;
+
+ int (*tags_alloc)(struct nouveau_ltc *, u32 count,
+ struct nouveau_mm_node **);
+ void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **);
+ void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count);
+
+ int zbc_min;
+ int zbc_max;
+ int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]);
+ int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32);
+};
+
+static inline struct nouveau_ltc *
+nouveau_ltc(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC];
+}
+
+extern struct nouveau_oclass *gf100_ltc_oclass;
+extern struct nouveau_oclass *gk104_ltc_oclass;
+extern struct nouveau_oclass *gm107_ltc_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
deleted file mode 100644
index c9c1950b7743..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __NOUVEAU_LTCG_H__
-#define __NOUVEAU_LTCG_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_mm_node;
-
-struct nouveau_ltcg {
- struct nouveau_subdev base;
-
- int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
- struct nouveau_mm_node **);
- void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
- void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
-};
-
-static inline struct nouveau_ltcg *
-nouveau_ltcg(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
-}
-
-#define nouveau_ltcg_create(p,e,o,d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \
- sizeof(**d), (void **)d)
-#define nouveau_ltcg_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_ltcg_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_ltcg_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
-#define _nouveau_ltcg_init _nouveau_subdev_init
-#define _nouveau_ltcg_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass *gf100_ltcg_oclass;
-extern struct nouveau_oclass *gm107_ltcg_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 72b176831be6..568e4dfc5e9e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -4,15 +4,11 @@
#include <core/subdev.h>
#include <core/device.h>
-struct nouveau_mc_intr {
- u32 stat;
- u32 unit;
-};
-
struct nouveau_mc {
struct nouveau_subdev base;
bool use_msi;
unsigned int irq;
+ void (*unk260)(struct nouveau_mc *, u32);
};
static inline struct nouveau_mc *
@@ -21,30 +17,6 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_mc_destroy(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
-})
-#define nouveau_mc_init(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
-})
-#define nouveau_mc_fini(p,s) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
-})
-
-int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_mc_dtor(struct nouveau_object *);
-int _nouveau_mc_init(struct nouveau_object *);
-int _nouveau_mc_fini(struct nouveau_object *, bool);
-
-struct nouveau_mc_oclass {
- struct nouveau_oclass base;
- const struct nouveau_mc_intr *intr;
- void (*msi_rearm)(struct nouveau_mc *);
-};
-
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
@@ -54,5 +26,6 @@ extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
extern struct nouveau_oclass *nvc0_mc_oclass;
extern struct nouveau_oclass *nvc3_mc_oclass;
+extern struct nouveau_oclass *gk20a_mc_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index c5c92cbed33f..f73feec151db 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -8,18 +8,6 @@ struct nouveau_pwr {
struct nouveau_subdev base;
struct {
- u32 limit;
- u32 *data;
- u32 size;
- } code;
-
- struct {
- u32 limit;
- u32 *data;
- u32 size;
- } data;
-
- struct {
u32 base;
u32 size;
} send;
@@ -35,7 +23,8 @@ struct nouveau_pwr {
u32 data[2];
} recv;
- int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+ int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+ void (*pgob)(struct nouveau_pwr *, bool);
};
static inline struct nouveau_pwr *
@@ -44,29 +33,11 @@ nouveau_pwr(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
}
-#define nouveau_pwr_create(p, e, o, d) \
- nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_pwr_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_pwr_init(p) ({ \
- struct nouveau_pwr *ppwr = (p); \
- _nouveau_pwr_init(nv_object(ppwr)); \
-})
-#define nouveau_pwr_fini(p,s) ({ \
- struct nouveau_pwr *ppwr = (p); \
- _nouveau_pwr_fini(nv_object(ppwr), (s)); \
-})
-
-int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-#define _nouveau_pwr_dtor _nouveau_subdev_dtor
-int _nouveau_pwr_init(struct nouveau_object *);
-int _nouveau_pwr_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nva3_pwr_oclass;
-extern struct nouveau_oclass nvc0_pwr_oclass;
-extern struct nouveau_oclass nvd0_pwr_oclass;
-extern struct nouveau_oclass nv108_pwr_oclass;
+extern struct nouveau_oclass *nva3_pwr_oclass;
+extern struct nouveau_oclass *nvc0_pwr_oclass;
+extern struct nouveau_oclass *nvd0_pwr_oclass;
+extern struct nouveau_oclass *gk104_pwr_oclass;
+extern struct nouveau_oclass *nv108_pwr_oclass;
/* interface to MEMX process running on PPWR */
struct nouveau_memx;
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index d0ced94ca54c..ccfa21d72ddc 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -21,6 +21,8 @@
#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/clk.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index 73b1ed20c8d5..8bcbdf39cfb2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -99,8 +99,13 @@ nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
struct nouveau_mem *mem, struct nouveau_object **pobject)
{
struct nouveau_object *engine = nv_object(bar);
- return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
- mem, 0, pobject);
+ int ret = -ENOMEM;
+ if (bar->iomem) {
+ ret = nouveau_object_ctor(parent, engine,
+ &nouveau_barobj_oclass,
+ mem, 0, pobject);
+ }
+ return ret;
}
int
@@ -118,9 +123,12 @@ nouveau_bar_create_(struct nouveau_object *parent,
if (ret)
return ret;
- if (nv_device_resource_len(device, 3) != 0)
+ if (nv_device_resource_len(device, 3) != 0) {
bar->iomem = ioremap(nv_device_resource_start(device, 3),
nv_device_resource_len(device, 3));
+ if (!bar->iomem)
+ nv_warn(bar, "PRAMIN ioremap failed\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
new file mode 100644
index 000000000000..bf877af9d3bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/bar.h>
+
+#include "priv.h"
+
+int
+gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar;
+ int ret;
+
+ ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject);
+ if (ret)
+ return ret;
+
+ bar = (struct nouveau_bar *)*pobject;
+ bar->iomap_uncached = true;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_bar_oclass = {
+ .handle = NV_SUBDEV(BAR, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_bar_ctor,
+ .dtor = nvc0_bar_dtor,
+ .init = nvc0_bar_init,
+ .fini = _nouveau_bar_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index ca8139b9ab27..0a44459844e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -133,7 +133,7 @@ nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
return 0;
}
-static int
+int
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -169,7 +169,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static void
+void
nvc0_bar_dtor(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
@@ -188,7 +188,7 @@ nvc0_bar_dtor(struct nouveau_object *object)
nouveau_bar_destroy(&priv->base);
}
-static int
+int
nvc0_bar_init(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
index ffad8f337ead..3ee8b1476d00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -23,4 +23,10 @@ int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
void nv84_bar_flush(struct nouveau_bar *);
+int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nvc0_bar_dtor(struct nouveau_object *);
+int nvc0_bar_init(struct nouveau_object *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index 22351f594d2a..a276a711294a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -90,16 +90,20 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
cstate = &pstate->base;
}
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
- if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise fan speed: %d\n", ret);
- return ret;
+ if (ptherm) {
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise fan speed: %d\n", ret);
+ return ret;
+ }
}
- ret = volt->set_id(volt, cstate->voltage, +1);
- if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise voltage: %d\n", ret);
- return ret;
+ if (volt) {
+ ret = volt->set_id(volt, cstate->voltage, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise voltage: %d\n", ret);
+ return ret;
+ }
}
ret = clk->calc(clk, cstate);
@@ -108,13 +112,17 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
clk->tidy(clk);
}
- ret = volt->set_id(volt, cstate->voltage, -1);
- if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower voltage: %d\n", ret);
+ if (volt) {
+ ret = volt->set_id(volt, cstate->voltage, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower voltage: %d\n", ret);
+ }
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
- if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ if (ptherm) {
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ }
return 0;
}
@@ -194,16 +202,23 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
return nouveau_cstate_prog(clk, pstate, 0);
}
-static int
-nouveau_pstate_calc(struct nouveau_clock *clk)
+static void
+nouveau_pstate_work(struct work_struct *work)
{
- int pstate, ret = 0;
+ struct nouveau_clock *clk = container_of(work, typeof(*clk), work);
+ int pstate;
- nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
- clk->ustate, clk->astate, clk->tstate, clk->dstate);
+ if (!atomic_xchg(&clk->waiting, 0))
+ return;
+ clk->pwrsrc = power_supply_is_system_supplied();
- if (clk->state_nr && clk->ustate != -1) {
- pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
+ nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+ clk->astate, clk->tstate, clk->dstate);
+
+ pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
+ if (clk->state_nr && pstate != -1) {
+ pstate = (pstate < 0) ? clk->astate : pstate;
pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
pstate = max(pstate, clk->dstate);
} else {
@@ -211,9 +226,26 @@ nouveau_pstate_calc(struct nouveau_clock *clk)
}
nv_trace(clk, "-> %d\n", pstate);
- if (pstate != clk->pstate)
- ret = nouveau_pstate_prog(clk, pstate);
- return ret;
+ if (pstate != clk->pstate) {
+ int ret = nouveau_pstate_prog(clk, pstate);
+ if (ret) {
+ nv_error(clk, "error setting pstate %d: %d\n",
+ pstate, ret);
+ }
+ }
+
+ wake_up_all(&clk->wait);
+ nvkm_notify_get(&clk->pwrsrc_ntfy);
+}
+
+static int
+nouveau_pstate_calc(struct nouveau_clock *clk, bool wait)
+{
+ atomic_set(&clk->waiting, 1);
+ schedule_work(&clk->work);
+ if (wait)
+ wait_event(clk->wait, !atomic_read(&clk->waiting));
+ return 0;
}
static void
@@ -361,17 +393,40 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
req = i;
}
- clk->ustate = req;
- return 0;
+ return req + 2;
+}
+
+static int
+nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen)
+{
+ int ret = 1;
+
+ if (strncasecmpz(mode, "disabled", arglen)) {
+ char save = mode[arglen];
+ long v;
+
+ ((char *)mode)[arglen] = '\0';
+ if (!kstrtol(mode, 0, &v)) {
+ ret = nouveau_clock_ustate_update(clk, v);
+ if (ret < 0)
+ ret = 1;
+ }
+ ((char *)mode)[arglen] = save;
+ }
+
+ return ret - 2;
}
int
-nouveau_clock_ustate(struct nouveau_clock *clk, int req)
+nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr)
{
int ret = nouveau_clock_ustate_update(clk, req);
- if (ret)
- return ret;
- return nouveau_pstate_calc(clk);
+ if (ret >= 0) {
+ if (ret -= 2, pwr) clk->ustate_ac = ret;
+ else clk->ustate_dc = ret;
+ return nouveau_pstate_calc(clk, true);
+ }
+ return ret;
}
int
@@ -381,7 +436,7 @@ nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->astate += rel;
clk->astate = min(clk->astate, clk->state_nr - 1);
clk->astate = max(clk->astate, 0);
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
}
int
@@ -391,7 +446,7 @@ nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->tstate += rel;
clk->tstate = min(clk->tstate, 0);
clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
}
int
@@ -401,12 +456,30 @@ nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->dstate += rel;
clk->dstate = min(clk->dstate, clk->state_nr - 1);
clk->dstate = max(clk->dstate, 0);
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
+}
+
+static int
+nouveau_clock_pwrsrc(struct nvkm_notify *notify)
+{
+ struct nouveau_clock *clk =
+ container_of(notify, typeof(*clk), pwrsrc_ntfy);
+ nouveau_pstate_calc(clk, false);
+ return NVKM_NOTIFY_DROP;
}
/******************************************************************************
* subdev base class implementation
*****************************************************************************/
+
+int
+_nouveau_clock_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_clock *clk = (void *)object;
+ nvkm_notify_put(&clk->pwrsrc_ntfy);
+ return nouveau_subdev_fini(&clk->base, suspend);
+}
+
int
_nouveau_clock_init(struct nouveau_object *object)
{
@@ -414,6 +487,10 @@ _nouveau_clock_init(struct nouveau_object *object)
struct nouveau_clocks *clock = clk->domains;
int ret;
+ ret = nouveau_subdev_init(&clk->base);
+ if (ret)
+ return ret;
+
memset(&clk->bstate, 0x00, sizeof(clk->bstate));
INIT_LIST_HEAD(&clk->bstate.list);
clk->bstate.pstate = 0xff;
@@ -434,7 +511,7 @@ _nouveau_clock_init(struct nouveau_object *object)
clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
- nouveau_pstate_calc(clk);
+ nouveau_pstate_calc(clk, true);
return 0;
}
@@ -444,6 +521,8 @@ _nouveau_clock_dtor(struct nouveau_object *object)
struct nouveau_clock *clk = (void *)object;
struct nouveau_pstate *pstate, *temp;
+ nvkm_notify_fini(&clk->pwrsrc_ntfy);
+
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
nouveau_pstate_del(pstate);
}
@@ -456,6 +535,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
struct nouveau_clocks *clocks,
+ struct nouveau_pstate *pstates, int nb_pstates,
bool allow_reclock,
int length, void **object)
{
@@ -472,29 +552,46 @@ nouveau_clock_create_(struct nouveau_object *parent,
INIT_LIST_HEAD(&clk->states);
clk->domains = clocks;
- clk->ustate = -1;
+ clk->ustate_ac = -1;
+ clk->ustate_dc = -1;
+
+ INIT_WORK(&clk->work, nouveau_pstate_work);
+ init_waitqueue_head(&clk->wait);
+ atomic_set(&clk->waiting, 0);
- idx = 0;
- do {
- ret = nouveau_pstate_new(clk, idx++);
- } while (ret == 0);
+ /* If no pstates are provided, try and fetch them from the BIOS */
+ if (!pstates) {
+ idx = 0;
+ do {
+ ret = nouveau_pstate_new(clk, idx++);
+ } while (ret == 0);
+ } else {
+ for (idx = 0; idx < nb_pstates; idx++)
+ list_add_tail(&pstates[idx].head, &clk->states);
+ clk->state_nr = nb_pstates;
+ }
clk->allow_reclock = allow_reclock;
+ ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true,
+ NULL, 0, 0, &clk->pwrsrc_ntfy);
+ if (ret)
+ return ret;
+
mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
- if (!strncasecmpz(mode, "disabled", arglen)) {
- clk->ustate = -1;
- } else {
- char save = mode[arglen];
- long v;
-
- ((char *)mode)[arglen] = '\0';
- if (!kstrtol(mode, 0, &v))
- nouveau_clock_ustate_update(clk, v);
- ((char *)mode)[arglen] = save;
- }
+ clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
+ clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
}
+ mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen);
+ if (mode)
+ clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
+
+ mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen);
+ if (mode)
+ clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
+
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
new file mode 100644
index 000000000000..425a8d5e9129
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
+ *
+ */
+
+#define MHZ (1000 * 1000)
+
+#define MASK(w) ((1 << w) - 1)
+
+#define SYS_GPCPLL_CFG_BASE 0x00137000
+#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
+
+#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE BIT(0)
+#define GPCPLL_CFG_IDDQ BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
+#define GPCPLL_CFG_LOCK BIT(17)
+
+#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT 0
+#define GPCPLL_COEFF_M_WIDTH 8
+#define GPCPLL_COEFF_N_SHIFT 8
+#define GPCPLL_COEFF_N_WIDTH 8
+#define GPCPLL_COEFF_P_SHIFT 16
+#define GPCPLL_COEFF_P_WIDTH 6
+
+#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+
+#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
+
+#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
+
+#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH 6
+#define GPC2CLK_OUT_VCODIV_SHIFT 8
+#define GPC2CLK_OUT_VCODIV1 0
+#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+ GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_SHIFT 0
+#define GPC2CLK_OUT_BYPDIV31 0x3c
+#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+ | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+ | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+ | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+ | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
+
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+ (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
+
+static const u8 pl_to_div[] = {
+/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
+/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
+};
+
+/* All frequencies in Mhz */
+struct gk20a_clk_pllg_params {
+ u32 min_vco, max_vco;
+ u32 min_u, max_u;
+ u32 min_m, max_m;
+ u32 min_n, max_n;
+ u32 min_pl, max_pl;
+};
+
+static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
+ .min_vco = 1000, .max_vco = 1700,
+ .min_u = 12, .max_u = 38,
+ .min_m = 1, .max_m = 255,
+ .min_n = 8, .max_n = 255,
+ .min_pl = 1, .max_pl = 32,
+};
+
+struct gk20a_clock_priv {
+ struct nouveau_clock base;
+ const struct gk20a_clk_pllg_params *params;
+ u32 m, n, pl;
+ u32 parent_rate;
+};
+#define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base)
+
+static void
+gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv)
+{
+ u32 val;
+
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+ priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+}
+
+static u32
+gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv)
+{
+ u32 rate;
+ u32 divider;
+
+ rate = priv->parent_rate * priv->n;
+ divider = priv->m * pl_to_div[priv->pl];
+ do_div(rate, divider);
+
+ return rate / 2;
+}
+
+static int
+gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate)
+{
+ u32 target_clk_f, ref_clk_f, target_freq;
+ u32 min_vco_f, max_vco_f;
+ u32 low_pl, high_pl, best_pl;
+ u32 target_vco_f, vco_f;
+ u32 best_m, best_n;
+ u32 u_f;
+ u32 m, n, n2;
+ u32 delta, lwv, best_delta = ~0;
+ u32 pl;
+
+ target_clk_f = rate * 2 / MHZ;
+ ref_clk_f = priv->parent_rate / MHZ;
+
+ max_vco_f = priv->params->max_vco;
+ min_vco_f = priv->params->min_vco;
+ best_m = priv->params->max_m;
+ best_n = priv->params->min_n;
+ best_pl = priv->params->min_pl;
+
+ target_vco_f = target_clk_f + target_clk_f / 50;
+ if (max_vco_f < target_vco_f)
+ max_vco_f = target_vco_f;
+
+ /* min_pl <= high_pl <= max_pl */
+ high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
+ high_pl = min(high_pl, priv->params->max_pl);
+ high_pl = max(high_pl, priv->params->min_pl);
+
+ /* min_pl <= low_pl <= max_pl */
+ low_pl = min_vco_f / target_vco_f;
+ low_pl = min(low_pl, priv->params->max_pl);
+ low_pl = max(low_pl, priv->params->min_pl);
+
+ /* Find Indices of high_pl and low_pl */
+ for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
+ if (pl_to_div[pl] >= low_pl) {
+ low_pl = pl;
+ break;
+ }
+ }
+ for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
+ if (pl_to_div[pl] >= high_pl) {
+ high_pl = pl;
+ break;
+ }
+ }
+
+ nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+ pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+
+ /* Select lowest possible VCO */
+ for (pl = low_pl; pl <= high_pl; pl++) {
+ target_vco_f = target_clk_f * pl_to_div[pl];
+ for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+ u_f = ref_clk_f / m;
+
+ if (u_f < priv->params->min_u)
+ break;
+ if (u_f > priv->params->max_u)
+ continue;
+
+ n = (target_vco_f * m) / ref_clk_f;
+ n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
+
+ if (n > priv->params->max_n)
+ break;
+
+ for (; n <= n2; n++) {
+ if (n < priv->params->min_n)
+ continue;
+ if (n > priv->params->max_n)
+ break;
+
+ vco_f = ref_clk_f * n / m;
+
+ if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
+ lwv = (vco_f + (pl_to_div[pl] / 2))
+ / pl_to_div[pl];
+ delta = abs(lwv - target_clk_f);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_m = m;
+ best_n = n;
+ best_pl = pl;
+
+ if (best_delta == 0)
+ goto found_match;
+ }
+ }
+ }
+ }
+ }
+
+found_match:
+ WARN_ON(best_delta == ~0);
+
+ if (best_delta != 0)
+ nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
+ target_clk_f);
+
+ priv->m = best_m;
+ priv->n = best_n;
+ priv->pl = best_pl;
+
+ target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+
+ nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+
+ return 0;
+}
+
+static int
+gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n)
+{
+ u32 val;
+ int ramp_timeout;
+
+ /* get old coefficients */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ /* do nothing if NDIV is the same */
+ if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+ return 0;
+
+ /* setup */
+ nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+ nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+ /* pll slowdown mode */
+ nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+ /* new ndiv ready for ramp */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
+ val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ udelay(1);
+ nv_wr32(priv, GPCPLL_COEFF, val);
+
+ /* dynamic ramp to new ndiv */
+ val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
+ udelay(1);
+ nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+
+ for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
+ udelay(1);
+ val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+ if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
+ break;
+ }
+
+ /* exit slowdown mode */
+ nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+ nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+
+ if (ramp_timeout <= 0) {
+ nv_error(priv, "gpcpll dynamic ramp timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void
+_gk20a_pllg_enable(struct gk20a_clock_priv *priv)
+{
+ nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nv_rd32(priv, GPCPLL_CFG);
+}
+
+static void
+_gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+{
+ nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nv_rd32(priv, GPCPLL_CFG);
+}
+
+static int
+_gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
+{
+ u32 val, cfg;
+ u32 m_old, pl_old, n_lo;
+
+ /* get old coefficients */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+
+ /* do NDIV slide if there is no change in M and PL */
+ cfg = nv_rd32(priv, GPCPLL_CFG);
+ if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+ (cfg & GPCPLL_CFG_ENABLE)) {
+ return gk20a_pllg_slide(priv, priv->n);
+ }
+
+ /* slide down to NDIV_LO */
+ n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
+ int ret = gk20a_pllg_slide(priv, n_lo);
+
+ if (ret)
+ return ret;
+ }
+
+ /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+ nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
+
+ /* put PLL in bypass before programming it */
+ val = nv_rd32(priv, SEL_VCO);
+ val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ udelay(2);
+ nv_wr32(priv, SEL_VCO, val);
+
+ /* get out from IDDQ */
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_IDDQ) {
+ val &= ~GPCPLL_CFG_IDDQ;
+ nv_wr32(priv, GPCPLL_CFG, val);
+ nv_rd32(priv, GPCPLL_CFG);
+ udelay(2);
+ }
+
+ _gk20a_pllg_disable(priv);
+
+ nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
+ priv->pl);
+
+ n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ val = priv->m << GPCPLL_COEFF_M_SHIFT;
+ val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
+ val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
+ nv_wr32(priv, GPCPLL_COEFF, val);
+
+ _gk20a_pllg_enable(priv);
+
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+ val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+ nv_wr32(priv, GPCPLL_CFG, val);
+ }
+
+ if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK)) {
+ nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* switch to VCO mode */
+ nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ /* restore out divider 1:1 */
+ val = nv_rd32(priv, GPC2CLK_OUT);
+ val &= ~GPC2CLK_OUT_VCODIV_MASK;
+ udelay(2);
+ nv_wr32(priv, GPC2CLK_OUT, val);
+
+ /* slide up to new NDIV */
+ return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+}
+
+static int
+gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv)
+{
+ int err;
+
+ err = _gk20a_pllg_program_mnp(priv, true);
+ if (err)
+ err = _gk20a_pllg_program_mnp(priv, false);
+
+ return err;
+}
+
+static void
+gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+{
+ u32 val;
+
+ /* slide to VCO min */
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_ENABLE) {
+ u32 coeff, m, n_lo;
+
+ coeff = nv_rd32(priv, GPCPLL_COEFF);
+ m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ gk20a_pllg_slide(priv, n_lo);
+ }
+
+ /* put PLL in bypass before disabling it */
+ nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+ _gk20a_pllg_disable(priv);
+}
+
+#define GK20A_CLK_GPC_MDIV 1000
+
+static struct nouveau_clocks
+gk20a_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+};
+
+static struct nouveau_pstate
+gk20a_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 72000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 108000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 180000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 252000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 324000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 396000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 468000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 540000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 612000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 648000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 684000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 708000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 756000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 804000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 852000,
+ },
+ },
+};
+
+static int
+gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(clk)->crystal;
+ case nv_clk_src_gpc:
+ gk20a_pllg_read_mnp(priv);
+ return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+ default:
+ nv_error(clk, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static int
+gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+ GK20A_CLK_GPC_MDIV);
+}
+
+static int
+gk20a_clock_prog(struct nouveau_clock *clk)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ return gk20a_pllg_program_mnp(priv);
+}
+
+static void
+gk20a_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static int
+gk20a_clock_fini(struct nouveau_object *object, bool suspend)
+{
+ struct gk20a_clock_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_clock_fini(&priv->base, false);
+
+ gk20a_pllg_disable(priv);
+
+ return ret;
+}
+
+static int
+gk20a_clock_init(struct nouveau_object *object)
+{
+ struct gk20a_clock_priv *priv = (void *)object;
+ int ret;
+
+ nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+
+ ret = nouveau_clock_init(&priv->base);
+ if (ret)
+ return ret;
+
+ ret = gk20a_clock_prog(&priv->base);
+ if (ret) {
+ nv_error(priv, "cannot initialize clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_clock_priv *priv;
+ struct nouveau_platform_device *plat;
+ int ret;
+ int i;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
+ INIT_LIST_HEAD(&gk20a_pstates[i].list);
+ gk20a_pstates[i].pstate = i + 1;
+ }
+
+ ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains,
+ gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->params = &gk20a_pllg_params;
+
+ plat = nv_device_to_platform(nv_device(parent));
+ priv->parent_rate = clk_get_rate(plat->gpu->clk);
+ nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
+
+ priv->base.read = gk20a_clock_read;
+ priv->base.calc = gk20a_clock_calc;
+ priv->base.prog = gk20a_clock_prog;
+ priv->base.tidy = gk20a_clock_tidy;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_clock_oclass = {
+ .handle = NV_SUBDEV(CLOCK, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_clock_ctor,
+ .dtor = _nouveau_subdev_dtor,
+ .init = gk20a_clock_init,
+ .fini = gk20a_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index eb2d4425a49e..4c48232686be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -82,8 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 8a9e16839791..08368fe97029 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -213,8 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv40_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0,
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index 8c132772ba9e..5070ebc260f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
- false, &priv);
+ NULL, 0, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 9fb58354a80b..087012b18956 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -302,8 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 6a65fc9e9663..74e19731b1b7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -421,8 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvaa_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL,
+ 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index dbf8517f54da..1234abaab2db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -437,8 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 0e62a3240144..7eccad57512e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -475,8 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0,
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 1fc55c1e91a1..4150b0d10af8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -250,9 +250,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c08_page) {
- priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
- if (!priv->r100c08)
- nv_warn(priv, "failed 0x100c08 page map\n");
+ priv->r100c08 = dma_map_page(nv_device_base(device),
+ priv->r100c08_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(nv_device_base(device), priv->r100c08))
+ return -EFAULT;
} else {
nv_warn(priv, "failed 0x100c08 page alloc\n");
}
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
struct nv50_fb_priv *priv = (void *)object;
if (priv->r100c08_page) {
- nv_device_unmap_page(device, priv->r100c08);
+ dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 0670ae33ee45..b19a2b3c1081 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
struct nvc0_fb_priv *priv = (void *)object;
if (priv->r100c10_page) {
- nv_device_unmap_page(device, priv->r100c10);
+ dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(priv->r100c10_page);
}
@@ -93,8 +94,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) {
- priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
- if (!priv->r100c10)
+ priv->r100c10 = dma_map_page(nv_device_base(device),
+ priv->r100c10_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(nv_device_base(device), priv->r100c10))
return -EFAULT;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 5a6a5027f749..2b284b192763 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -26,7 +26,7 @@
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/clock.h>
#include <subdev/clock/pll.h>
@@ -425,7 +425,7 @@ extern const u8 nvc0_pte_storage_type_map[256];
void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
- struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_ltc *ltc = nouveau_ltc(pfb);
struct nouveau_mem *mem = *pmem;
*pmem = NULL;
@@ -434,7 +434,7 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
mutex_lock(&pfb->base.mutex);
if (mem->tag)
- ltcg->tags_free(ltcg, &mem->tag);
+ ltc->tags_free(ltc, &mem->tag);
__nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
@@ -468,12 +468,12 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mutex_lock(&pfb->base.mutex);
if (comp) {
- struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_ltc *ltc = nouveau_ltc(pfb);
/* compression only works with lpages */
if (align == (1 << (17 - 12))) {
int n = size >> 5;
- ltcg->tags_alloc(ltcg, n, &mem->tag);
+ ltc->tags_alloc(ltc, n, &mem->tag);
}
if (unlikely(!mem->tag))
@@ -554,13 +554,13 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
} else {
/* otherwise, address lowest common amount from 0GiB */
ret = nouveau_mm_init(&pfb->vram, rsvd_head,
- (bsize << 8) * parts, 1);
+ (bsize << 8) * parts - rsvd_head, 1);
if (ret)
return ret;
/* and the rest starting from (8GiB + common_size) */
offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (ram->size >> 12) - (bsize << 8) - rsvd_tail;
+ length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 45e0202f3151..b1e3ed7c8beb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -106,39 +106,59 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
}
static void
-nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index)
+nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 0);
}
static void
-nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index)
+nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 1 << index);
}
+static int
+nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nvkm_gpio_ntfy_req *req = data;
+ if (!WARN_ON(size != sizeof(*req))) {
+ notify->size = sizeof(struct nvkm_gpio_ntfy_rep);
+ notify->types = req->mask;
+ notify->index = req->line;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_gpio_intr(struct nouveau_subdev *subdev)
{
struct nouveau_gpio *gpio = nouveau_gpio(subdev);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- u32 hi, lo, e, i;
+ u32 hi, lo, i;
impl->intr_stat(gpio, &hi, &lo);
- for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) {
- if (hi & (1 << i))
- e |= NVKM_GPIO_HI;
- if (lo & (1 << i))
- e |= NVKM_GPIO_LO;
- nouveau_event_trigger(gpio->events, e, i);
+ for (i = 0; (hi | lo) && i < impl->lines; i++) {
+ struct nvkm_gpio_ntfy_rep rep = {
+ .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
+ (NVKM_GPIO_LO * !!(lo & (1 << i))),
+ };
+ nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep));
}
}
+static const struct nvkm_event_func
+nouveau_gpio_intr_func = {
+ .ctor = nouveau_gpio_intr_ctor,
+ .init = nouveau_gpio_intr_init,
+ .fini = nouveau_gpio_intr_fini,
+};
+
int
_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
{
@@ -183,7 +203,7 @@ void
_nouveau_gpio_dtor(struct nouveau_object *object)
{
struct nouveau_gpio *gpio = (void *)object;
- nouveau_event_destroy(&gpio->events);
+ nvkm_event_fini(&gpio->event);
nouveau_subdev_destroy(&gpio->base);
}
@@ -208,13 +228,11 @@ nouveau_gpio_create_(struct nouveau_object *parent,
gpio->get = nouveau_gpio_get;
gpio->reset = impl->reset;
- ret = nouveau_event_create(2, impl->lines, &gpio->events);
+ ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines,
+ &gpio->event);
if (ret)
return ret;
- gpio->events->priv = gpio;
- gpio->events->enable = nouveau_gpio_intr_enable;
- gpio->events->disable = nouveau_gpio_intr_disable;
nv_subdev(gpio)->intr = nouveau_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 09ba2cc851cf..a652cafde3d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -326,9 +326,9 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
static void
-nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
+nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nouveau_i2c_port *port = i2c->find(i2c, index);
const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
@@ -336,15 +336,28 @@ nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
}
static void
-nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index)
+nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nouveau_i2c_port *port = i2c->find(i2c, index);
const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
}
+static int
+nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nvkm_i2c_ntfy_req *req = data;
+ if (!WARN_ON(size != sizeof(*req))) {
+ notify->size = sizeof(struct nvkm_i2c_ntfy_rep);
+ notify->types = req->mask;
+ notify->index = req->port;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_i2c_intr(struct nouveau_subdev *subdev)
{
@@ -364,13 +377,26 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev)
if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
-
- nouveau_event_trigger(i2c->ntfy, e, port->index);
+ if (e) {
+ struct nvkm_i2c_ntfy_rep rep = {
+ .mask = e,
+ };
+ nvkm_event_send(&i2c->event, rep.mask,
+ port->index, &rep,
+ sizeof(rep));
+ }
}
}
}
}
+static const struct nvkm_event_func
+nouveau_i2c_intr_func = {
+ .ctor = nouveau_i2c_intr_ctor,
+ .init = nouveau_i2c_intr_init,
+ .fini = nouveau_i2c_intr_fini,
+};
+
int
_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
@@ -431,7 +457,7 @@ _nouveau_i2c_dtor(struct nouveau_object *object)
struct nouveau_i2c *i2c = (void *)object;
struct nouveau_i2c_port *port, *temp;
- nouveau_event_destroy(&i2c->ntfy);
+ nvkm_event_fini(&i2c->event);
list_for_each_entry_safe(port, temp, &i2c->ports, head) {
nouveau_object_ref(NULL, (struct nouveau_object **)&port);
@@ -547,13 +573,10 @@ nouveau_i2c_create_(struct nouveau_object *parent,
}
}
- ret = nouveau_event_create(4, index, &i2c->ntfy);
+ ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event);
if (ret)
return ret;
- i2c->ntfy->priv = i2c;
- i2c->ntfy->enable = nouveau_i2c_intr_enable;
- i2c->ntfy->disable = nouveau_i2c_intr_disable;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
new file mode 100644
index 000000000000..32ed442c5913
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "priv.h"
+
+static int
+nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
+ struct nouveau_mm_node **pnode)
+{
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ int ret;
+
+ ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+ if (ret)
+ *pnode = NULL;
+
+ return ret;
+}
+
+static void
+nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode)
+{
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ nouveau_mm_free(&priv->tags, pnode);
+}
+
+static void
+nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ const u32 limit = first + count - 1;
+
+ BUG_ON((first > limit) || (limit >= priv->num_tags));
+
+ impl->cbc_clear(priv, first, limit);
+ impl->cbc_wait(priv);
+}
+
+static int
+nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4])
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
+ impl->zbc_clear_color(priv, index, color);
+ return index;
+}
+
+static int
+nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ priv->zbc_depth[index] = depth;
+ impl->zbc_clear_depth(priv, index, depth);
+ return index;
+}
+
+int
+_nvkm_ltc_init(struct nouveau_object *object)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_subdev_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
+ impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
+ impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+ }
+
+ return 0;
+}
+
+int
+nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
+{
+ const struct nvkm_ltc_impl *impl = (void *)oclass;
+ struct nvkm_ltc_priv *priv;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG",
+ "l2c", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
+ memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
+
+ priv->base.base.intr = impl->intr;
+ priv->base.tags_alloc = nvkm_ltc_tags_alloc;
+ priv->base.tags_free = nvkm_ltc_tags_free;
+ priv->base.tags_clear = nvkm_ltc_tags_clear;
+ priv->base.zbc_min = 1; /* reserve 0 for disabled */
+ priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1;
+ priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
+ priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index f2f3338a967a..b54b582e72c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -25,10 +25,45 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "gf100.h"
+#include "priv.h"
+
+void
+gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+{
+ nv_wr32(priv, 0x17e8cc, start);
+ nv_wr32(priv, 0x17e8d0, limit);
+ nv_wr32(priv, 0x17e8c8, 0x00000004);
+}
+
+void
+gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+{
+ int c, s;
+ for (c = 0; c < priv->ltc_nr; c++) {
+ for (s = 0; s < priv->lts_nr; s++)
+ nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+ }
+}
+
+void
+gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+{
+ nv_mask(priv, 0x17ea44, 0x0000000f, i);
+ nv_wr32(priv, 0x17ea48, color[0]);
+ nv_wr32(priv, 0x17ea4c, color[1]);
+ nv_wr32(priv, 0x17ea50, color[2]);
+ nv_wr32(priv, 0x17ea54, color[3]);
+}
+
+void
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+{
+ nv_mask(priv, 0x17ea44, 0x0000000f, i);
+ nv_wr32(priv, 0x17ea58, depth);
+}
static void
-gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
+gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
{
u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
u32 stat = nv_rd32(priv, base + 0x020);
@@ -39,17 +74,17 @@ gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
}
}
-static void
-gf100_ltcg_intr(struct nouveau_subdev *subdev)
+void
+gf100_ltc_intr(struct nouveau_subdev *subdev)
{
- struct gf100_ltcg_priv *priv = (void *)subdev;
+ struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
mask = nv_rd32(priv, 0x00017c);
while (mask) {
u32 lts, ltc = __ffs(mask);
for (lts = 0; lts < priv->lts_nr; lts++)
- gf100_ltcg_lts_isr(priv, ltc, lts);
+ gf100_ltc_lts_isr(priv, ltc, lts);
mask &= ~(1 << ltc);
}
@@ -59,52 +94,38 @@ gf100_ltcg_intr(struct nouveau_subdev *subdev)
nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
}
-int
-gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
- struct nouveau_mm_node **pnode)
+static int
+gf100_ltc_init(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
+ struct nvkm_ltc_priv *priv = (void *)object;
int ret;
- ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+ ret = nvkm_ltc_init(priv);
if (ret)
- *pnode = NULL;
+ return ret;
- return ret;
+ nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
}
void
-gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
-{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
-
- nouveau_mm_free(&priv->tags, pnode);
-}
-
-static void
-gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+gf100_ltc_dtor(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- u32 last = first + count - 1;
- int p, i;
-
- BUG_ON((first > last) || (last >= priv->num_tags));
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_ltc_priv *priv = (void *)object;
- nv_wr32(priv, 0x17e8cc, first);
- nv_wr32(priv, 0x17e8d0, last);
- nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
+ nouveau_mm_fini(&priv->tags);
+ nouveau_mm_free(&pfb->vram, &priv->tag_ram);
- /* wait until it's finished with clearing */
- for (p = 0; p < priv->ltc_nr; ++p) {
- for (i = 0; i < priv->lts_nr; ++i)
- nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
- }
+ nvkm_ltc_destroy(priv);
}
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
-gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
+gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
{
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -135,29 +156,29 @@ gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
if (ret) {
priv->num_tags = 0;
} else {
- u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
+ u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align);
priv->tag_base = tag_base;
}
- ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+ ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
return ret;
}
-static int
-gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+int
+gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct gf100_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
- ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+ ret = nvkm_ltc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -170,57 +191,27 @@ gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- ret = gf100_ltcg_init_tag_ram(pfb, priv);
+ ret = gf100_ltc_init_tag_ram(pfb, priv);
if (ret)
return ret;
- priv->base.tags_alloc = gf100_ltcg_tags_alloc;
- priv->base.tags_free = gf100_ltcg_tags_free;
- priv->base.tags_clear = gf100_ltcg_tags_clear;
-
- nv_subdev(priv)->intr = gf100_ltcg_intr;
- return 0;
-}
-
-void
-gf100_ltcg_dtor(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
-
- nouveau_mm_fini(&priv->tags);
- nouveau_mm_free(&pfb->vram, &priv->tag_ram);
-
- nouveau_ltcg_destroy(ltcg);
-}
-
-static int
-gf100_ltcg_init(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- int ret;
-
- ret = nouveau_ltcg_init(ltcg);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
- nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
- if (nv_device(ltcg)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->ltc_nr);
- nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_subdev(priv)->intr = gf100_ltc_intr;
return 0;
}
struct nouveau_oclass *
-gf100_ltcg_oclass = &(struct nouveau_oclass) {
- .handle = NV_SUBDEV(LTCG, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = gf100_ltcg_ctor,
- .dtor = gf100_ltcg_dtor,
- .init = gf100_ltcg_init,
- .fini = _nouveau_ltcg_fini,
+gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gf100_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gf100_ltc_init,
+ .fini = _nvkm_ltc_fini,
},
-};
+ .intr = gf100_ltc_intr,
+ .cbc_clear = gf100_ltc_cbc_clear,
+ .cbc_wait = gf100_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gf100_ltc_zbc_clear_color,
+ .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
new file mode 100644
index 000000000000..ea716569745d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+gk104_ltc_init(struct nouveau_object *object)
+{
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret;
+
+ ret = nvkm_ltc_init(priv);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
+ nv_wr32(priv, 0x17e000, priv->ltc_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
+}
+
+struct nouveau_oclass *
+gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xe4),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gf100_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gk104_ltc_init,
+ .fini = _nvkm_ltc_fini,
+ },
+ .intr = gf100_ltc_intr,
+ .cbc_clear = gf100_ltc_cbc_clear,
+ .cbc_wait = gf100_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gf100_ltc_zbc_clear_color,
+ .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index e79d0e81de40..4761b2e9af00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -25,10 +25,45 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "gf100.h"
+#include "priv.h"
static void
-gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
+gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+{
+ nv_wr32(priv, 0x17e270, start);
+ nv_wr32(priv, 0x17e274, limit);
+ nv_wr32(priv, 0x17e26c, 0x00000004);
+}
+
+static void
+gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+{
+ int c, s;
+ for (c = 0; c < priv->ltc_nr; c++) {
+ for (s = 0; s < priv->lts_nr; s++)
+ nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+ }
+}
+
+static void
+gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+{
+ nv_mask(priv, 0x17e338, 0x0000000f, i);
+ nv_wr32(priv, 0x17e33c, color[0]);
+ nv_wr32(priv, 0x17e340, color[1]);
+ nv_wr32(priv, 0x17e344, color[2]);
+ nv_wr32(priv, 0x17e348, color[3]);
+}
+
+static void
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+{
+ nv_mask(priv, 0x17e338, 0x0000000f, i);
+ nv_wr32(priv, 0x17e34c, depth);
+}
+
+static void
+gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
{
u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
u32 stat = nv_rd32(priv, base + 0x00c);
@@ -40,16 +75,16 @@ gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
}
static void
-gm107_ltcg_intr(struct nouveau_subdev *subdev)
+gm107_ltc_intr(struct nouveau_subdev *subdev)
{
- struct gf100_ltcg_priv *priv = (void *)subdev;
+ struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
mask = nv_rd32(priv, 0x00017c);
while (mask) {
u32 lts, ltc = __ffs(mask);
for (lts = 0; lts < priv->lts_nr; lts++)
- gm107_ltcg_lts_isr(priv, ltc, lts);
+ gm107_ltc_lts_isr(priv, ltc, lts);
mask &= ~(1 << ltc);
}
@@ -59,37 +94,32 @@ gm107_ltcg_intr(struct nouveau_subdev *subdev)
nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
}
-static void
-gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+static int
+gm107_ltc_init(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- u32 last = first + count - 1;
- int p, i;
-
- BUG_ON((first > last) || (last >= priv->num_tags));
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret;
- nv_wr32(priv, 0x17e270, first);
- nv_wr32(priv, 0x17e274, last);
- nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */
+ ret = nvkm_ltc_init(priv);
+ if (ret)
+ return ret;
- /* wait until it's finished with clearing */
- for (p = 0; p < priv->ltc_nr; ++p) {
- for (i = 0; i < priv->lts_nr; ++i)
- nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0);
- }
+ nv_wr32(priv, 0x17e27c, priv->ltc_nr);
+ nv_wr32(priv, 0x17e278, priv->tag_base);
+ return 0;
}
static int
-gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct gf100_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
- ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+ ret = nvkm_ltc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -102,41 +132,26 @@ gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
- ret = gf100_ltcg_init_tag_ram(pfb, priv);
- if (ret)
- return ret;
-
- priv->base.tags_alloc = gf100_ltcg_tags_alloc;
- priv->base.tags_free = gf100_ltcg_tags_free;
- priv->base.tags_clear = gm107_ltcg_tags_clear;
-
- nv_subdev(priv)->intr = gm107_ltcg_intr;
- return 0;
-}
-
-static int
-gm107_ltcg_init(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- int ret;
-
- ret = nouveau_ltcg_init(ltcg);
+ ret = gf100_ltc_init_tag_ram(pfb, priv);
if (ret)
return ret;
- nv_wr32(priv, 0x17e27c, priv->ltc_nr);
- nv_wr32(priv, 0x17e278, priv->tag_base);
return 0;
}
struct nouveau_oclass *
-gm107_ltcg_oclass = &(struct nouveau_oclass) {
- .handle = NV_SUBDEV(LTCG, 0xff),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = gm107_ltcg_ctor,
- .dtor = gf100_ltcg_dtor,
- .init = gm107_ltcg_init,
- .fini = _nouveau_ltcg_fini,
+gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xff),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gm107_ltc_init,
+ .fini = _nvkm_ltc_fini,
},
-};
+ .intr = gm107_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
new file mode 100644
index 000000000000..594924f39126
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
@@ -0,0 +1,69 @@
+#ifndef __NVKM_LTC_PRIV_H__
+#define __NVKM_LTC_PRIV_H__
+
+#include <subdev/ltc.h>
+#include <subdev/fb.h>
+
+struct nvkm_ltc_priv {
+ struct nouveau_ltc base;
+ u32 ltc_nr;
+ u32 lts_nr;
+
+ u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
+ struct nouveau_mm_node *tag_ram;
+
+ u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4];
+ u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
+};
+
+#define nvkm_ltc_create(p,e,o,d) \
+ nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_ltc_destroy(p) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_dtor(nv_object(_priv)); \
+})
+#define nvkm_ltc_init(p) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_init(nv_object(_priv)); \
+})
+#define nvkm_ltc_fini(p,s) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_fini(nv_object(_priv), (s)); \
+})
+
+int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+
+#define _nvkm_ltc_dtor _nouveau_subdev_dtor
+int _nvkm_ltc_init(struct nouveau_object *);
+#define _nvkm_ltc_fini _nouveau_subdev_fini
+
+int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void gf100_ltc_dtor(struct nouveau_object *);
+int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *);
+int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **);
+void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **);
+
+struct nvkm_ltc_impl {
+ struct nouveau_oclass base;
+ void (*intr)(struct nouveau_subdev *);
+
+ void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
+ void (*cbc_wait)(struct nvkm_ltc_priv *);
+
+ int zbc;
+ void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
+ void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+};
+
+void gf100_ltc_intr(struct nouveau_subdev *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
deleted file mode 100644
index 87b10b8412ea..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_LTCG_PRIV_GF100_H__
-#define __NVKM_LTCG_PRIV_GF100_H__
-
-#include <subdev/ltcg.h>
-
-struct gf100_ltcg_priv {
- struct nouveau_ltcg base;
- u32 ltc_nr;
- u32 lts_nr;
- u32 num_tags;
- u32 tag_base;
- struct nouveau_mm tags;
- struct nouveau_mm_node *tag_ram;
-};
-
-void gf100_ltcg_dtor(struct nouveau_object *);
-int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *);
-int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **);
-void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8a5555192fa5..ca7cee3a314a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -22,9 +22,17 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
+#include "priv.h"
#include <core/option.h>
+static inline void
+nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data)
+{
+ const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc);
+ if (impl->unk260)
+ impl->unk260(pmc, data);
+}
+
static inline u32
nouveau_mc_intr_mask(struct nouveau_mc *pmc)
{
@@ -114,6 +122,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pmc->unk260 = nouveau_mc_unk260;
+
if (nv_device_is_pci(device))
switch (device->pdev->device & 0x0ff0) {
case 0x00f0:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
new file mode 100644
index 000000000000..b8d6cb435d0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+gk20a_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv50_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nvc0_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index 81a408e7d034..4d9ea46c47c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -1,7 +1,7 @@
#ifndef __NVKM_MC_NV04_H__
#define __NVKM_MC_NV04_H__
-#include <subdev/mc.h>
+#include "priv.h"
struct nv04_mc_priv {
struct nouveau_mc base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index f9c6a678b47d..15d41dc176ff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -41,7 +41,7 @@ nvc0_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
{ 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x01000000, NVDEV_SUBDEV_PWR },
- { 0x02000000, NVDEV_SUBDEV_LTCG },
+ { 0x02000000, NVDEV_SUBDEV_LTC },
{ 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
@@ -56,6 +56,12 @@ nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
nv_wr32(priv, 0x088704, 0x00000000);
}
+void
+nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data)
+{
+ nv_wr32(pmc, 0x000260, data);
+}
+
struct nouveau_oclass *
nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0xc0),
@@ -67,4 +73,5 @@ nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
},
.intr = nvc0_mc_intr,
.msi_rearm = nvc0_mc_msi_rearm,
+ .unk260 = nvc0_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
index 837e545aeb9f..68b5f61aadb5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -35,4 +35,5 @@ nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
},
.intr = nvc0_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
+ .unk260 = nvc0_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
new file mode 100644
index 000000000000..911e66392587
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_MC_PRIV_H__
+#define __NVKM_MC_PRIV_H__
+
+#include <subdev/mc.h>
+
+#define nouveau_mc_create(p,e,o,d) \
+ nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
+})
+#define nouveau_mc_init(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
+})
+#define nouveau_mc_fini(p,s) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
+})
+
+int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_mc_dtor(struct nouveau_object *);
+int _nouveau_mc_init(struct nouveau_object *);
+int _nouveau_mc_fini(struct nouveau_object *, bool);
+
+struct nouveau_mc_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nouveau_mc_oclass {
+ struct nouveau_oclass base;
+ const struct nouveau_mc_intr *intr;
+ void (*msi_rearm)(struct nouveau_mc *);
+ void (*unk260)(struct nouveau_mc *, u32);
+};
+
+void nvc0_mc_unk260(struct nouveau_mc *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
index d4fd3bc9c66f..69f1f34f6931 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -22,9 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
#include <subdev/timer.h>
+#include "priv.h"
+
+static void
+nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
+{
+ const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr);
+ if (impl->pgob)
+ impl->pgob(ppwr, enable);
+}
+
static int
nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
u32 process, u32 message, u32 data0, u32 data1)
@@ -177,6 +186,7 @@ _nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
int
_nouveau_pwr_init(struct nouveau_object *object)
{
+ const struct nvkm_pwr_impl *impl = (void *)object->oclass;
struct nouveau_pwr *ppwr = (void *)object;
int ret, i;
@@ -186,6 +196,7 @@ _nouveau_pwr_init(struct nouveau_object *object)
nv_subdev(ppwr)->intr = nouveau_pwr_intr;
ppwr->message = nouveau_pwr_send;
+ ppwr->pgob = nouveau_pwr_pgob;
/* prevent previous ucode from running, wait for idle, reset */
nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
@@ -195,15 +206,15 @@ _nouveau_pwr_init(struct nouveau_object *object)
/* upload data segment */
nv_wr32(ppwr, 0x10a1c0, 0x01000000);
- for (i = 0; i < ppwr->data.size / 4; i++)
- nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
+ for (i = 0; i < impl->data.size / 4; i++)
+ nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]);
/* upload code segment */
nv_wr32(ppwr, 0x10a180, 0x01000000);
- for (i = 0; i < ppwr->code.size / 4; i++) {
+ for (i = 0; i < impl->code.size / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(ppwr, 0x10a188, i >> 6);
- nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
+ nv_wr32(ppwr, 0x10a184, impl->code.data[i]);
}
/* start it running */
@@ -245,3 +256,15 @@ nouveau_pwr_create_(struct nouveau_object *parent,
init_waitqueue_head(&ppwr->recv.wait);
return 0;
}
+
+int
+_nouveau_pwr_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_pwr *ppwr;
+ int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr);
+ *pobject = nv_object(ppwr);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index e2a63ac5422b..5668e045bac1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -242,7 +242,7 @@
*/ push reg /*
*/ pop $r13 /*
*/ pop $r14 /*
-*/ call(wr32) /*
+*/ call(wr32)
#else
#define nv_wr32(addr,reg) /*
*/ sethi $r0 0x14000000 /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 39a5dc150a05..986495d533dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000046f,
- 0x00000461,
+ 0x00000464,
+ 0x00000456,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000473,
- 0x00000471,
+ 0x00000468,
+ 0x00000466,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000877,
- 0x0000071e,
+ 0x0000086c,
+ 0x00000713,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000898,
- 0x00000879,
+ 0x0000088d,
+ 0x0000086e,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000008a3,
- 0x000008a1,
+ 0x00000898,
+ 0x00000896,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nv108_pwr_data[] = {
0x000003df,
0x00040003,
0x00000000,
- 0x00000407,
+ 0x000003fc,
0x00010004,
0x00000000,
- 0x00000421,
+ 0x00000416,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1080,375 +1080,375 @@ uint32_t nv108_pwr_code[] = {
0x50f960f9,
0xe0fcd0fc,
0x00002e7e,
- 0x140003f1,
- 0xa00506fd,
- 0xb604bd05,
- 0x1bf40242,
-/* 0x0407: memx_func_wait */
- 0x0800f8dd,
- 0x0088cf2c,
- 0x98001e98,
- 0x1c98011d,
- 0x031b9802,
- 0x7e1010b6,
- 0xf8000071,
-/* 0x0421: memx_func_delay */
+ 0xf40242b6,
+ 0x00f8e81b,
+/* 0x03fc: memx_func_wait */
+ 0x88cf2c08,
0x001e9800,
- 0x7e0410b6,
- 0xf800005d,
-/* 0x042d: memx_exec */
- 0xf9e0f900,
- 0xb2c1b2d0,
-/* 0x0435: memx_exec_next */
- 0x001398b2,
- 0x950410b6,
- 0x30f01034,
- 0xde35980c,
- 0x12a655f9,
- 0xfced1ef4,
- 0x7ee0fcd0,
- 0xf800023f,
-/* 0x0455: memx_info */
- 0x03ac4c00,
- 0x7e08004b,
- 0xf800023f,
-/* 0x0461: memx_recv */
- 0x01d6b000,
- 0xb0c90bf4,
- 0x0bf400d6,
-/* 0x046f: memx_init */
- 0xf800f8eb,
-/* 0x0471: perf_recv */
-/* 0x0473: perf_init */
- 0xf800f800,
-/* 0x0475: i2c_drive_scl */
- 0x0036b000,
- 0x400d0bf4,
- 0x01f607e0,
- 0xf804bd00,
-/* 0x0485: i2c_drive_scl_lo */
- 0x07e44000,
- 0xbd0001f6,
-/* 0x048f: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0x07e0400d,
- 0xbd0002f6,
-/* 0x049f: i2c_drive_sda_lo */
- 0x4000f804,
- 0x02f607e4,
- 0xf804bd00,
-/* 0x04a9: i2c_sense_scl */
- 0x0132f400,
- 0xcf07c443,
- 0x31fd0033,
- 0x060bf404,
-/* 0x04bb: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x04bd: i2c_sense_sda */
- 0x0132f400,
- 0xcf07c443,
- 0x32fd0033,
- 0x060bf404,
-/* 0x04cf: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x04d1: i2c_raise_scl */
- 0x4440f900,
- 0x01030898,
- 0x0004757e,
-/* 0x04dc: i2c_raise_scl_wait */
- 0x7e03e84e,
- 0x7e00005d,
- 0xf40004a9,
- 0x42b60901,
- 0xef1bf401,
-/* 0x04f0: i2c_raise_scl_done */
- 0x00f840fc,
-/* 0x04f4: i2c_start */
- 0x0004a97e,
- 0x7e0d11f4,
- 0xf40004bd,
- 0x0ef40611,
-/* 0x0505: i2c_start_rep */
- 0x7e00032e,
- 0x03000475,
- 0x048f7e01,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xd17e50fc,
- 0x64b60004,
- 0x1d11f404,
-/* 0x0530: i2c_start_send */
- 0x8f7e0003,
- 0x884e0004,
- 0x005d7e13,
- 0x7e000300,
- 0x4e000475,
- 0x5d7e1388,
-/* 0x054a: i2c_start_out */
- 0x00f80000,
-/* 0x054c: i2c_stop */
- 0x757e0003,
- 0x00030004,
- 0x00048f7e,
- 0x7e03e84e,
- 0x0300005d,
- 0x04757e01,
- 0x13884e00,
+ 0x98011d98,
+ 0x1b98021c,
+ 0x1010b603,
+ 0x0000717e,
+/* 0x0416: memx_func_delay */
+ 0x1e9800f8,
+ 0x0410b600,
0x00005d7e,
- 0x8f7e0103,
- 0x884e0004,
- 0x005d7e13,
-/* 0x057b: i2c_bitw */
- 0x7e00f800,
- 0x4e00048f,
- 0x5d7e03e8,
- 0x76bb0000,
+/* 0x0422: memx_exec */
+ 0xe0f900f8,
+ 0xc1b2d0f9,
+/* 0x042a: memx_exec_next */
+ 0x1398b2b2,
+ 0x0410b600,
+ 0xf0103495,
+ 0x35980c30,
+ 0xa655f9de,
+ 0xed1ef412,
+ 0xe0fcd0fc,
+ 0x00023f7e,
+/* 0x044a: memx_info */
+ 0xac4c00f8,
+ 0x08004b03,
+ 0x00023f7e,
+/* 0x0456: memx_recv */
+ 0xd6b000f8,
+ 0xc90bf401,
+ 0xf400d6b0,
+ 0x00f8eb0b,
+/* 0x0464: memx_init */
+/* 0x0466: perf_recv */
+ 0x00f800f8,
+/* 0x0468: perf_init */
+/* 0x046a: i2c_drive_scl */
+ 0x36b000f8,
+ 0x0d0bf400,
+ 0xf607e040,
+ 0x04bd0001,
+/* 0x047a: i2c_drive_scl_lo */
+ 0xe44000f8,
+ 0x0001f607,
+ 0x00f804bd,
+/* 0x0484: i2c_drive_sda */
+ 0xf40036b0,
+ 0xe0400d0b,
+ 0x0002f607,
+ 0x00f804bd,
+/* 0x0494: i2c_drive_sda_lo */
+ 0xf607e440,
+ 0x04bd0002,
+/* 0x049e: i2c_sense_scl */
+ 0x32f400f8,
+ 0x07c44301,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x04b0: i2c_sense_scl_done */
+/* 0x04b2: i2c_sense_sda */
+ 0x32f400f8,
+ 0x07c44301,
+ 0xfd0033cf,
+ 0x0bf40432,
+ 0x0131f406,
+/* 0x04c4: i2c_sense_sda_done */
+/* 0x04c6: i2c_raise_scl */
+ 0x40f900f8,
+ 0x03089844,
+ 0x046a7e01,
+/* 0x04d1: i2c_raise_scl_wait */
+ 0x03e84e00,
+ 0x00005d7e,
+ 0x00049e7e,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x04e5: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x04e9: i2c_start */
+ 0x049e7e00,
+ 0x0d11f400,
+ 0x0004b27e,
+ 0xf40611f4,
+/* 0x04fa: i2c_start_rep */
+ 0x00032e0e,
+ 0x00046a7e,
+ 0x847e0103,
+ 0x76bb0004,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60004d1,
+ 0xb60004c6,
0x11f40464,
- 0x13884e17,
+/* 0x0525: i2c_start_send */
+ 0x7e00031d,
+ 0x4e000484,
+ 0x5d7e1388,
+ 0x00030000,
+ 0x00046a7e,
+ 0x7e13884e,
+/* 0x053f: i2c_start_out */
+ 0xf800005d,
+/* 0x0541: i2c_stop */
+ 0x7e000300,
+ 0x0300046a,
+ 0x04847e00,
+ 0x03e84e00,
0x00005d7e,
- 0x757e0003,
+ 0x6a7e0103,
0x884e0004,
0x005d7e13,
-/* 0x05b9: i2c_bitw_out */
-/* 0x05bb: i2c_bitr */
- 0x0300f800,
- 0x048f7e01,
- 0x03e84e00,
- 0x00005d7e,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x04d17e50,
- 0x0464b600,
- 0x7e1a11f4,
- 0x030004bd,
- 0x04757e00,
- 0x13884e00,
- 0x00005d7e,
- 0xf4013cf0,
-/* 0x05fe: i2c_bitr_done */
- 0x00f80131,
-/* 0x0600: i2c_get_byte */
- 0x08040005,
-/* 0x0604: i2c_get_byte_next */
- 0xbb0154b6,
+ 0x7e010300,
+ 0x4e000484,
+ 0x5d7e1388,
+ 0x00f80000,
+/* 0x0570: i2c_bitw */
+ 0x0004847e,
+ 0x7e03e84e,
+ 0xbb00005d,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0005bb7e,
+ 0x0004c67e,
0xf40464b6,
- 0x53fd2a11,
- 0x0142b605,
- 0x03d81bf4,
- 0x0076bb01,
+ 0x884e1711,
+ 0x005d7e13,
+ 0x7e000300,
+ 0x4e00046a,
+ 0x5d7e1388,
+/* 0x05ae: i2c_bitw_out */
+ 0x00f80000,
+/* 0x05b0: i2c_bitr */
+ 0x847e0103,
+ 0xe84e0004,
+ 0x005d7e03,
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x7b7e50fc,
- 0x64b60005,
-/* 0x064d: i2c_get_byte_done */
-/* 0x064f: i2c_put_byte */
- 0x0400f804,
-/* 0x0651: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00057b7e,
- 0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0xc67e50fc,
+ 0x64b60004,
+ 0x1a11f404,
+ 0x0004b27e,
+ 0x6a7e0003,
+ 0x884e0004,
+ 0x005d7e13,
+ 0x013cf000,
+/* 0x05f3: i2c_bitr_done */
+ 0xf80131f4,
+/* 0x05f5: i2c_get_byte */
+ 0x04000500,
+/* 0x05f9: i2c_get_byte_next */
+ 0x0154b608,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x05bb7e50,
+ 0x05b07e50,
0x0464b600,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x06a7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x06a9: i2c_addr */
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xf47e50fc,
- 0x64b60004,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0xfd2a11f4,
+ 0x42b60553,
+ 0xd81bf401,
+ 0x76bb0103,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb600064f,
-/* 0x06ee: i2c_addr_done */
+ 0xb6000570,
+/* 0x0642: i2c_get_byte_done */
0x00f80464,
-/* 0x06f0: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x06fc: i2c_acquire */
- 0x0006f07e,
- 0x0000047e,
- 0x7e03d9f0,
- 0xf800002e,
-/* 0x070d: i2c_release */
- 0x06f07e00,
- 0x00047e00,
- 0x03daf000,
- 0x00002e7e,
-/* 0x071e: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13b80137,
- 0x98000bd4,
- 0x13b80032,
- 0x98000bac,
- 0x31f40031,
- 0xf9d0f902,
- 0xf1d0f9e0,
- 0xf1000067,
- 0x92100063,
- 0x76bb0167,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb60006fc,
- 0xd0fc0464,
- 0xf500d6b0,
- 0x0500b01b,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xa97e50fc,
- 0x64b60006,
- 0xcc11f504,
- 0xe0c5c700,
+/* 0x0644: i2c_put_byte */
+/* 0x0646: i2c_put_byte_next */
+ 0x42b60804,
+ 0x3854ff01,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x064f7e50,
+ 0x05707e50,
0x0464b600,
- 0x00a911f5,
- 0x76bb0105,
+ 0xb03411f4,
+ 0x1bf40046,
+ 0x0076bbd8,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xb07e50fc,
+ 0x64b60005,
+ 0x0f11f404,
+ 0xb00076bb,
+ 0x1bf40136,
+ 0x0132f406,
+/* 0x069c: i2c_put_byte_done */
+/* 0x069e: i2c_addr */
+ 0x76bb00f8,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60006a9,
- 0x11f50464,
- 0x76bb0087,
+ 0xb60004e9,
+ 0x11f40464,
+ 0x2ec3e729,
+ 0x0134b601,
+ 0xbb0553fd,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006447e,
+/* 0x06e3: i2c_addr_done */
+ 0xf80464b6,
+/* 0x06e5: i2c_acquire_addr */
+ 0xf8cec700,
+ 0xb705e4b6,
+ 0xf8d014e0,
+/* 0x06f1: i2c_acquire */
+ 0x06e57e00,
+ 0x00047e00,
+ 0x03d9f000,
+ 0x00002e7e,
+/* 0x0702: i2c_release */
+ 0xe57e00f8,
+ 0x047e0006,
+ 0xdaf00000,
+ 0x002e7e03,
+/* 0x0713: i2c_recv */
+ 0xf400f800,
+ 0xc1c70132,
+ 0x0214b6f8,
+ 0xf52816b0,
+ 0xb801371f,
+ 0x000bd413,
+ 0xb8003298,
+ 0x000bac13,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006f17e,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b01bf5,
+ 0x76bb0005,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000600,
- 0x11f40464,
- 0xe05bcb67,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x054c7e50,
- 0x0464b600,
- 0x74bd5bb2,
-/* 0x0823: i2c_recv_not_rd08 */
- 0xb0410ef4,
- 0x1bf401d6,
- 0x7e00053b,
- 0xf40006a9,
- 0xc5c73211,
- 0x064f7ee0,
- 0x2811f400,
- 0xa97e0005,
+ 0xb600069e,
+ 0x11f50464,
+ 0xc5c700cc,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x447e50fc,
+ 0x64b60006,
+ 0xa911f504,
+ 0xbb010500,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x00069e7e,
+ 0xf50464b6,
+ 0xbb008711,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0005f57e,
+ 0xf40464b6,
+ 0x5bcb6711,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x417e50fc,
+ 0x64b60005,
+ 0xbd5bb204,
+ 0x410ef474,
+/* 0x0818: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x00053b1b,
+ 0x00069e7e,
+ 0xc73211f4,
+ 0x447ee0c5,
0x11f40006,
- 0xe0b5c71f,
- 0x00064f7e,
- 0x7e1511f4,
- 0xbd00054c,
- 0x08c5c774,
- 0xf4091bf4,
- 0x0ef40232,
-/* 0x0861: i2c_recv_not_wr08 */
-/* 0x0861: i2c_recv_done */
- 0xf8cec703,
- 0x00070d7e,
- 0xd0fce0fc,
- 0xb20912f4,
- 0x023f7e7c,
-/* 0x0875: i2c_recv_exit */
-/* 0x0877: i2c_init */
- 0xf800f800,
-/* 0x0879: test_recv */
- 0x04584100,
- 0xb60011cf,
- 0x58400110,
- 0x0001f604,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x967e134f,
- 0x00f80001,
-/* 0x0898: test_init */
- 0x7e08004e,
- 0xf8000196,
-/* 0x08a1: idle_recv */
-/* 0x08a3: idle */
- 0xf400f800,
- 0x54410031,
+ 0x7e000528,
+ 0xf400069e,
+ 0xb5c71f11,
+ 0x06447ee0,
+ 0x1511f400,
+ 0x0005417e,
+ 0xc5c774bd,
+ 0x091bf408,
+ 0xf40232f4,
+/* 0x0856: i2c_recv_not_wr08 */
+/* 0x0856: i2c_recv_done */
+ 0xcec7030e,
+ 0x07027ef8,
+ 0xfce0fc00,
+ 0x0912f4d0,
+ 0x3f7e7cb2,
+/* 0x086a: i2c_recv_exit */
+ 0x00f80002,
+/* 0x086c: i2c_init */
+/* 0x086e: test_recv */
+ 0x584100f8,
0x0011cf04,
0x400110b6,
- 0x01f60454,
-/* 0x08b7: idle_loop */
- 0x0104bd00,
- 0x0232f458,
-/* 0x08bc: idle_proc */
-/* 0x08bc: idle_proc_exec */
- 0x1eb210f9,
- 0x0002487e,
- 0x11f410fc,
- 0x0231f409,
-/* 0x08cf: idle_proc_next */
- 0xb6f00ef4,
- 0x1fa65810,
- 0xf4e81bf4,
- 0x28f4e002,
- 0xc60ef400,
+ 0x01f60458,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0x7e134fe3,
+ 0xf8000196,
+/* 0x088d: test_init */
+ 0x08004e00,
+ 0x0001967e,
+/* 0x0896: idle_recv */
+ 0x00f800f8,
+/* 0x0898: idle */
+ 0x410031f4,
+ 0x11cf0454,
+ 0x0110b600,
+ 0xf6045440,
+ 0x04bd0001,
+/* 0x08ac: idle_loop */
+ 0x32f45801,
+/* 0x08b1: idle_proc */
+/* 0x08b1: idle_proc_exec */
+ 0xb210f902,
+ 0x02487e1e,
+ 0xf410fc00,
+ 0x31f40911,
+ 0xf00ef402,
+/* 0x08c4: idle_proc_next */
+ 0xa65810b6,
+ 0xe81bf41f,
+ 0xf4e002f4,
+ 0x0ef40028,
+ 0x000000c6,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 254205cd5166..e087ce3041be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000054e,
- 0x00000540,
+ 0x00000542,
+ 0x00000534,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000552,
- 0x00000550,
+ 0x00000546,
+ 0x00000544,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000982,
- 0x00000825,
+ 0x00000976,
+ 0x00000819,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x000009ab,
- 0x00000984,
+ 0x0000099f,
+ 0x00000978,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000009b7,
- 0x000009b5,
+ 0x000009ab,
+ 0x000009a9,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nva3_pwr_data[] = {
0x000004b7,
0x00040003,
0x00000000,
- 0x000004df,
+ 0x000004d3,
0x00010004,
0x00000000,
- 0x000004fc,
+ 0x000004f0,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nva3_pwr_code[] = {
0x0810b601,
0x50f960f9,
0xe0fcd0fc,
- 0xf13f21f4,
- 0xfd140003,
- 0x05800506,
- 0xb604bd00,
+ 0xb63f21f4,
0x1bf40242,
-/* 0x04df: memx_func_wait */
- 0xf000f8dd,
+/* 0x04d3: memx_func_wait */
+ 0xf000f8e9,
0x84b62c87,
0x0088cf06,
0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nva3_pwr_code[] = {
0x031b9802,
0xf41010b6,
0x00f89c21,
-/* 0x04fc: memx_func_delay */
+/* 0x04f0: memx_func_delay */
0xb6001e98,
0x21f40410,
-/* 0x0507: memx_exec */
+/* 0x04fb: memx_exec */
0xf900f87f,
0xb9d0f9e0,
0xb2b902c1,
-/* 0x0511: memx_exec_next */
+/* 0x0505: memx_exec_next */
0x00139802,
0x950410b6,
0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nva3_pwr_code[] = {
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
-/* 0x0532: memx_info */
+/* 0x0526: memx_info */
0xc7f100f8,
0xb7f103ac,
0x21f50800,
0x00f802b9,
-/* 0x0540: memx_recv */
+/* 0x0534: memx_recv */
0xf401d6b0,
0xd6b0c40b,
0xe90bf400,
-/* 0x054e: memx_init */
+/* 0x0542: memx_init */
0x00f800f8,
-/* 0x0550: perf_recv */
-/* 0x0552: perf_init */
+/* 0x0544: perf_recv */
+/* 0x0546: perf_init */
0x00f800f8,
-/* 0x0554: i2c_drive_scl */
+/* 0x0548: i2c_drive_scl */
0xf40036b0,
0x07f1110b,
0x04b607e0,
0x0001d006,
0x00f804bd,
-/* 0x0568: i2c_drive_scl_lo */
+/* 0x055c: i2c_drive_scl_lo */
0x07e407f1,
0xd00604b6,
0x04bd0001,
-/* 0x0576: i2c_drive_sda */
+/* 0x056a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
0x04bd0002,
-/* 0x058a: i2c_drive_sda_lo */
+/* 0x057e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
0x0002d006,
0x00f804bd,
-/* 0x0598: i2c_sense_scl */
+/* 0x058c: i2c_sense_scl */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0431fd00,
0xf4060bf4,
-/* 0x05ae: i2c_sense_scl_done */
+/* 0x05a2: i2c_sense_scl_done */
0x00f80131,
-/* 0x05b0: i2c_sense_sda */
+/* 0x05a4: i2c_sense_sda */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0432fd00,
0xf4060bf4,
-/* 0x05c6: i2c_sense_sda_done */
+/* 0x05ba: i2c_sense_sda_done */
0x00f80131,
-/* 0x05c8: i2c_raise_scl */
+/* 0x05bc: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0x5421f501,
-/* 0x05d5: i2c_raise_scl_wait */
+ 0x4821f501,
+/* 0x05c9: i2c_raise_scl_wait */
0xe8e7f105,
0x7f21f403,
- 0x059821f5,
+ 0x058c21f5,
0xb60901f4,
0x1bf40142,
-/* 0x05e9: i2c_raise_scl_done */
+/* 0x05dd: i2c_raise_scl_done */
0xf840fcef,
-/* 0x05ed: i2c_start */
- 0x9821f500,
+/* 0x05e1: i2c_start */
+ 0x8c21f500,
0x0d11f405,
- 0x05b021f5,
+ 0x05a421f5,
0xf40611f4,
-/* 0x05fe: i2c_start_rep */
+/* 0x05f2: i2c_start_rep */
0x37f0300e,
- 0x5421f500,
+ 0x4821f500,
0x0137f005,
- 0x057621f5,
+ 0x056a21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc821f550,
+ 0xbc21f550,
0x0464b605,
-/* 0x062b: i2c_start_send */
+/* 0x061f: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f10576,
+ 0xe7f1056a,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x0647: i2c_start_out */
+/* 0x063b: i2c_start_out */
0xf87f21f4,
-/* 0x0649: i2c_stop */
+/* 0x063d: i2c_stop */
0x0037f000,
- 0x055421f5,
+ 0x054821f5,
0xf50037f0,
- 0xf1057621,
+ 0xf1056a21,
0xf403e8e7,
0x37f07f21,
- 0x5421f501,
+ 0x4821f501,
0x88e7f105,
0x7f21f413,
0xf50137f0,
- 0xf1057621,
+ 0xf1056a21,
0xf41388e7,
0x00f87f21,
-/* 0x067c: i2c_bitw */
- 0x057621f5,
+/* 0x0670: i2c_bitw */
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x06bb: i2c_bitw_out */
+/* 0x06af: i2c_bitw_out */
0xf87f21f4,
-/* 0x06bd: i2c_bitr */
+/* 0x06b1: i2c_bitr */
0x0137f000,
- 0x057621f5,
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0x21f51b11,
- 0x37f005b0,
- 0x5421f500,
+ 0x37f005a4,
+ 0x4821f500,
0x88e7f105,
0x7f21f413,
0xf4013cf0,
-/* 0x0702: i2c_bitr_done */
+/* 0x06f6: i2c_bitr_done */
0x00f80131,
-/* 0x0704: i2c_get_byte */
+/* 0x06f8: i2c_get_byte */
0xf00057f0,
-/* 0x070a: i2c_get_byte_next */
+/* 0x06fe: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b606bd,
+ 0x64b606b1,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nva3_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7c21f550,
+ 0x7021f550,
0x0464b606,
-/* 0x0754: i2c_get_byte_done */
-/* 0x0756: i2c_put_byte */
+/* 0x0748: i2c_get_byte_done */
+/* 0x074a: i2c_put_byte */
0x47f000f8,
-/* 0x0759: i2c_put_byte_next */
+/* 0x074d: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x067c21f5,
+ 0x067021f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nva3_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xbd21f550,
+ 0xb121f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x07af: i2c_put_byte_done */
+/* 0x07a3: i2c_put_byte_done */
0xf80132f4,
-/* 0x07b1: i2c_addr */
+/* 0x07a5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b605ed,
+ 0x64b605e1,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nva3_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
-/* 0x07f6: i2c_addr_done */
+ 0xb6074a21,
+/* 0x07ea: i2c_addr_done */
0x00f80464,
-/* 0x07f8: i2c_acquire_addr */
+/* 0x07ec: i2c_acquire_addr */
0xb6f8cec7,
0xe0b702e4,
0xee980bfc,
-/* 0x0807: i2c_acquire */
+/* 0x07fb: i2c_acquire */
0xf500f800,
- 0xf407f821,
+ 0xf407ec21,
0xd9f00421,
0x3f21f403,
-/* 0x0816: i2c_release */
+/* 0x080a: i2c_release */
0x21f500f8,
- 0x21f407f8,
+ 0x21f407ec,
0x03daf004,
0xf83f21f4,
-/* 0x0825: i2c_recv */
+/* 0x0819: i2c_recv */
0x0132f400,
0xb6f8c1c7,
0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x080721f5,
+ 0x07fb21f5,
0xfc0464b6,
0x00d6b0d0,
0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x07b121f5,
+ 0x07a521f5,
0xf50464b6,
0xc700d011,
0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nva3_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
+ 0xb6074a21,
0x11f50464,
0x57f000ad,
0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607b1,
+ 0x64b607a5,
0x8a11f504,
0x0076bb00,
0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60704,
+ 0x64b606f8,
0x6a11f404,
0xbbe05bcb,
0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x064921f5,
+ 0x063d21f5,
0xb90464b6,
0x74bd025b,
-/* 0x092b: i2c_recv_not_rd08 */
+/* 0x091f: i2c_recv_not_rd08 */
0xb0430ef4,
0x1bf401d6,
0x0057f03d,
- 0x07b121f5,
+ 0x07a521f5,
0xc73311f4,
0x21f5e0c5,
- 0x11f40756,
+ 0x11f4074a,
0x0057f029,
- 0x07b121f5,
+ 0x07a521f5,
0xc71f11f4,
0x21f5e0b5,
- 0x11f40756,
- 0x4921f515,
+ 0x11f4074a,
+ 0x3d21f515,
0xc774bd06,
0x1bf408c5,
0x0232f409,
-/* 0x096b: i2c_recv_not_wr08 */
-/* 0x096b: i2c_recv_done */
+/* 0x095f: i2c_recv_not_wr08 */
+/* 0x095f: i2c_recv_done */
0xc7030ef4,
0x21f5f8ce,
- 0xe0fc0816,
+ 0xe0fc080a,
0x12f4d0fc,
0x027cb90a,
0x02b921f5,
-/* 0x0980: i2c_recv_exit */
-/* 0x0982: i2c_init */
+/* 0x0974: i2c_recv_exit */
+/* 0x0976: i2c_init */
0x00f800f8,
-/* 0x0984: test_recv */
+/* 0x0978: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nva3_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x09ab: test_init */
+/* 0x099f: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x09b5: idle_recv */
-/* 0x09b7: idle */
+/* 0x09a9: idle_recv */
+/* 0x09ab: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nva3_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x09d3: idle_loop */
+/* 0x09c7: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x09d9: idle_proc */
-/* 0x09d9: idle_proc_exec */
+/* 0x09cd: idle_proc */
+/* 0x09cd: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x09ed: idle_proc_next */
+/* 0x09e1: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 7ac87405d01b..0773ff0e3dc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000054e,
- 0x00000540,
+ 0x00000542,
+ 0x00000534,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000552,
- 0x00000550,
+ 0x00000546,
+ 0x00000544,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000982,
- 0x00000825,
+ 0x00000976,
+ 0x00000819,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x000009ab,
- 0x00000984,
+ 0x0000099f,
+ 0x00000978,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000009b7,
- 0x000009b5,
+ 0x000009ab,
+ 0x000009a9,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvc0_pwr_data[] = {
0x000004b7,
0x00040003,
0x00000000,
- 0x000004df,
+ 0x000004d3,
0x00010004,
0x00000000,
- 0x000004fc,
+ 0x000004f0,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nvc0_pwr_code[] = {
0x0810b601,
0x50f960f9,
0xe0fcd0fc,
- 0xf13f21f4,
- 0xfd140003,
- 0x05800506,
- 0xb604bd00,
+ 0xb63f21f4,
0x1bf40242,
-/* 0x04df: memx_func_wait */
- 0xf000f8dd,
+/* 0x04d3: memx_func_wait */
+ 0xf000f8e9,
0x84b62c87,
0x0088cf06,
0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nvc0_pwr_code[] = {
0x031b9802,
0xf41010b6,
0x00f89c21,
-/* 0x04fc: memx_func_delay */
+/* 0x04f0: memx_func_delay */
0xb6001e98,
0x21f40410,
-/* 0x0507: memx_exec */
+/* 0x04fb: memx_exec */
0xf900f87f,
0xb9d0f9e0,
0xb2b902c1,
-/* 0x0511: memx_exec_next */
+/* 0x0505: memx_exec_next */
0x00139802,
0x950410b6,
0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nvc0_pwr_code[] = {
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
-/* 0x0532: memx_info */
+/* 0x0526: memx_info */
0xc7f100f8,
0xb7f103ac,
0x21f50800,
0x00f802b9,
-/* 0x0540: memx_recv */
+/* 0x0534: memx_recv */
0xf401d6b0,
0xd6b0c40b,
0xe90bf400,
-/* 0x054e: memx_init */
+/* 0x0542: memx_init */
0x00f800f8,
-/* 0x0550: perf_recv */
-/* 0x0552: perf_init */
+/* 0x0544: perf_recv */
+/* 0x0546: perf_init */
0x00f800f8,
-/* 0x0554: i2c_drive_scl */
+/* 0x0548: i2c_drive_scl */
0xf40036b0,
0x07f1110b,
0x04b607e0,
0x0001d006,
0x00f804bd,
-/* 0x0568: i2c_drive_scl_lo */
+/* 0x055c: i2c_drive_scl_lo */
0x07e407f1,
0xd00604b6,
0x04bd0001,
-/* 0x0576: i2c_drive_sda */
+/* 0x056a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
0x04bd0002,
-/* 0x058a: i2c_drive_sda_lo */
+/* 0x057e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
0x0002d006,
0x00f804bd,
-/* 0x0598: i2c_sense_scl */
+/* 0x058c: i2c_sense_scl */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0431fd00,
0xf4060bf4,
-/* 0x05ae: i2c_sense_scl_done */
+/* 0x05a2: i2c_sense_scl_done */
0x00f80131,
-/* 0x05b0: i2c_sense_sda */
+/* 0x05a4: i2c_sense_sda */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0432fd00,
0xf4060bf4,
-/* 0x05c6: i2c_sense_sda_done */
+/* 0x05ba: i2c_sense_sda_done */
0x00f80131,
-/* 0x05c8: i2c_raise_scl */
+/* 0x05bc: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0x5421f501,
-/* 0x05d5: i2c_raise_scl_wait */
+ 0x4821f501,
+/* 0x05c9: i2c_raise_scl_wait */
0xe8e7f105,
0x7f21f403,
- 0x059821f5,
+ 0x058c21f5,
0xb60901f4,
0x1bf40142,
-/* 0x05e9: i2c_raise_scl_done */
+/* 0x05dd: i2c_raise_scl_done */
0xf840fcef,
-/* 0x05ed: i2c_start */
- 0x9821f500,
+/* 0x05e1: i2c_start */
+ 0x8c21f500,
0x0d11f405,
- 0x05b021f5,
+ 0x05a421f5,
0xf40611f4,
-/* 0x05fe: i2c_start_rep */
+/* 0x05f2: i2c_start_rep */
0x37f0300e,
- 0x5421f500,
+ 0x4821f500,
0x0137f005,
- 0x057621f5,
+ 0x056a21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc821f550,
+ 0xbc21f550,
0x0464b605,
-/* 0x062b: i2c_start_send */
+/* 0x061f: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f10576,
+ 0xe7f1056a,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x0647: i2c_start_out */
+/* 0x063b: i2c_start_out */
0xf87f21f4,
-/* 0x0649: i2c_stop */
+/* 0x063d: i2c_stop */
0x0037f000,
- 0x055421f5,
+ 0x054821f5,
0xf50037f0,
- 0xf1057621,
+ 0xf1056a21,
0xf403e8e7,
0x37f07f21,
- 0x5421f501,
+ 0x4821f501,
0x88e7f105,
0x7f21f413,
0xf50137f0,
- 0xf1057621,
+ 0xf1056a21,
0xf41388e7,
0x00f87f21,
-/* 0x067c: i2c_bitw */
- 0x057621f5,
+/* 0x0670: i2c_bitw */
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x06bb: i2c_bitw_out */
+/* 0x06af: i2c_bitw_out */
0xf87f21f4,
-/* 0x06bd: i2c_bitr */
+/* 0x06b1: i2c_bitr */
0x0137f000,
- 0x057621f5,
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0x21f51b11,
- 0x37f005b0,
- 0x5421f500,
+ 0x37f005a4,
+ 0x4821f500,
0x88e7f105,
0x7f21f413,
0xf4013cf0,
-/* 0x0702: i2c_bitr_done */
+/* 0x06f6: i2c_bitr_done */
0x00f80131,
-/* 0x0704: i2c_get_byte */
+/* 0x06f8: i2c_get_byte */
0xf00057f0,
-/* 0x070a: i2c_get_byte_next */
+/* 0x06fe: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b606bd,
+ 0x64b606b1,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nvc0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7c21f550,
+ 0x7021f550,
0x0464b606,
-/* 0x0754: i2c_get_byte_done */
-/* 0x0756: i2c_put_byte */
+/* 0x0748: i2c_get_byte_done */
+/* 0x074a: i2c_put_byte */
0x47f000f8,
-/* 0x0759: i2c_put_byte_next */
+/* 0x074d: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x067c21f5,
+ 0x067021f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nvc0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xbd21f550,
+ 0xb121f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x07af: i2c_put_byte_done */
+/* 0x07a3: i2c_put_byte_done */
0xf80132f4,
-/* 0x07b1: i2c_addr */
+/* 0x07a5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b605ed,
+ 0x64b605e1,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nvc0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
-/* 0x07f6: i2c_addr_done */
+ 0xb6074a21,
+/* 0x07ea: i2c_addr_done */
0x00f80464,
-/* 0x07f8: i2c_acquire_addr */
+/* 0x07ec: i2c_acquire_addr */
0xb6f8cec7,
0xe0b702e4,
0xee980bfc,
-/* 0x0807: i2c_acquire */
+/* 0x07fb: i2c_acquire */
0xf500f800,
- 0xf407f821,
+ 0xf407ec21,
0xd9f00421,
0x3f21f403,
-/* 0x0816: i2c_release */
+/* 0x080a: i2c_release */
0x21f500f8,
- 0x21f407f8,
+ 0x21f407ec,
0x03daf004,
0xf83f21f4,
-/* 0x0825: i2c_recv */
+/* 0x0819: i2c_recv */
0x0132f400,
0xb6f8c1c7,
0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x080721f5,
+ 0x07fb21f5,
0xfc0464b6,
0x00d6b0d0,
0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x07b121f5,
+ 0x07a521f5,
0xf50464b6,
0xc700d011,
0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nvc0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
+ 0xb6074a21,
0x11f50464,
0x57f000ad,
0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607b1,
+ 0x64b607a5,
0x8a11f504,
0x0076bb00,
0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60704,
+ 0x64b606f8,
0x6a11f404,
0xbbe05bcb,
0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x064921f5,
+ 0x063d21f5,
0xb90464b6,
0x74bd025b,
-/* 0x092b: i2c_recv_not_rd08 */
+/* 0x091f: i2c_recv_not_rd08 */
0xb0430ef4,
0x1bf401d6,
0x0057f03d,
- 0x07b121f5,
+ 0x07a521f5,
0xc73311f4,
0x21f5e0c5,
- 0x11f40756,
+ 0x11f4074a,
0x0057f029,
- 0x07b121f5,
+ 0x07a521f5,
0xc71f11f4,
0x21f5e0b5,
- 0x11f40756,
- 0x4921f515,
+ 0x11f4074a,
+ 0x3d21f515,
0xc774bd06,
0x1bf408c5,
0x0232f409,
-/* 0x096b: i2c_recv_not_wr08 */
-/* 0x096b: i2c_recv_done */
+/* 0x095f: i2c_recv_not_wr08 */
+/* 0x095f: i2c_recv_done */
0xc7030ef4,
0x21f5f8ce,
- 0xe0fc0816,
+ 0xe0fc080a,
0x12f4d0fc,
0x027cb90a,
0x02b921f5,
-/* 0x0980: i2c_recv_exit */
-/* 0x0982: i2c_init */
+/* 0x0974: i2c_recv_exit */
+/* 0x0976: i2c_init */
0x00f800f8,
-/* 0x0984: test_recv */
+/* 0x0978: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nvc0_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x09ab: test_init */
+/* 0x099f: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x09b5: idle_recv */
-/* 0x09b7: idle */
+/* 0x09a9: idle_recv */
+/* 0x09ab: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nvc0_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x09d3: idle_loop */
+/* 0x09c7: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x09d9: idle_proc */
-/* 0x09d9: idle_proc_exec */
+/* 0x09cd: idle_proc */
+/* 0x09cd: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x09ed: idle_proc_next */
+/* 0x09e1: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index cd9ff1a73284..8d369b3faaba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000004c4,
- 0x000004b6,
+ 0x000004b8,
+ 0x000004aa,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000004c8,
- 0x000004c6,
+ 0x000004bc,
+ 0x000004ba,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x000008e3,
- 0x00000786,
+ 0x000008d7,
+ 0x0000077a,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000906,
- 0x000008e5,
+ 0x000008fa,
+ 0x000008d9,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000912,
- 0x00000910,
+ 0x00000906,
+ 0x00000904,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvd0_pwr_data[] = {
0x00000430,
0x00040003,
0x00000000,
- 0x00000458,
+ 0x0000044c,
0x00010004,
0x00000000,
- 0x00000472,
+ 0x00000466,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1100,26 +1100,23 @@ uint32_t nvd0_pwr_code[] = {
0xf960f908,
0xfcd0fc50,
0x3321f4e0,
- 0x140003f1,
- 0x800506fd,
- 0x04bd0005,
0xf40242b6,
- 0x00f8dd1b,
-/* 0x0458: memx_func_wait */
+ 0x00f8e91b,
+/* 0x044c: memx_func_wait */
0xcf2c87f0,
0x1e980088,
0x011d9800,
0x98021c98,
0x10b6031b,
0x7e21f410,
-/* 0x0472: memx_func_delay */
+/* 0x0466: memx_func_delay */
0x1e9800f8,
0x0410b600,
0xf86721f4,
-/* 0x047d: memx_exec */
+/* 0x0471: memx_exec */
0xf9e0f900,
0x02c1b9d0,
-/* 0x0487: memx_exec_next */
+/* 0x047b: memx_exec_next */
0x9802b2b9,
0x10b60013,
0x10349504,
@@ -1129,107 +1126,107 @@ uint32_t nvd0_pwr_code[] = {
0xd0fcec1e,
0x21f5e0fc,
0x00f8026b,
-/* 0x04a8: memx_info */
+/* 0x049c: memx_info */
0x03acc7f1,
0x0800b7f1,
0x026b21f5,
-/* 0x04b6: memx_recv */
+/* 0x04aa: memx_recv */
0xd6b000f8,
0xc40bf401,
0xf400d6b0,
0x00f8e90b,
-/* 0x04c4: memx_init */
-/* 0x04c6: perf_recv */
+/* 0x04b8: memx_init */
+/* 0x04ba: perf_recv */
0x00f800f8,
-/* 0x04c8: perf_init */
-/* 0x04ca: i2c_drive_scl */
+/* 0x04bc: perf_init */
+/* 0x04be: i2c_drive_scl */
0x36b000f8,
0x0e0bf400,
0x07e007f1,
0xbd0001d0,
-/* 0x04db: i2c_drive_scl_lo */
+/* 0x04cf: i2c_drive_scl_lo */
0xf100f804,
0xd007e407,
0x04bd0001,
-/* 0x04e6: i2c_drive_sda */
+/* 0x04da: i2c_drive_sda */
0x36b000f8,
0x0e0bf400,
0x07e007f1,
0xbd0002d0,
-/* 0x04f7: i2c_drive_sda_lo */
+/* 0x04eb: i2c_drive_sda_lo */
0xf100f804,
0xd007e407,
0x04bd0002,
-/* 0x0502: i2c_sense_scl */
+/* 0x04f6: i2c_sense_scl */
0x32f400f8,
0xc437f101,
0x0033cf07,
0xf40431fd,
0x31f4060b,
-/* 0x0515: i2c_sense_scl_done */
-/* 0x0517: i2c_sense_sda */
+/* 0x0509: i2c_sense_scl_done */
+/* 0x050b: i2c_sense_sda */
0xf400f801,
0x37f10132,
0x33cf07c4,
0x0432fd00,
0xf4060bf4,
-/* 0x052a: i2c_sense_sda_done */
+/* 0x051e: i2c_sense_sda_done */
0x00f80131,
-/* 0x052c: i2c_raise_scl */
+/* 0x0520: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0xca21f501,
-/* 0x0539: i2c_raise_scl_wait */
+ 0xbe21f501,
+/* 0x052d: i2c_raise_scl_wait */
0xe8e7f104,
0x6721f403,
- 0x050221f5,
+ 0x04f621f5,
0xb60901f4,
0x1bf40142,
-/* 0x054d: i2c_raise_scl_done */
+/* 0x0541: i2c_raise_scl_done */
0xf840fcef,
-/* 0x0551: i2c_start */
- 0x0221f500,
- 0x0d11f405,
- 0x051721f5,
+/* 0x0545: i2c_start */
+ 0xf621f500,
+ 0x0d11f404,
+ 0x050b21f5,
0xf40611f4,
-/* 0x0562: i2c_start_rep */
+/* 0x0556: i2c_start_rep */
0x37f0300e,
- 0xca21f500,
+ 0xbe21f500,
0x0137f004,
- 0x04e621f5,
+ 0x04da21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x2c21f550,
+ 0x2021f550,
0x0464b605,
-/* 0x058f: i2c_start_send */
+/* 0x0583: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f104e6,
+ 0xe7f104da,
0x21f41388,
0x0037f067,
- 0x04ca21f5,
+ 0x04be21f5,
0x1388e7f1,
-/* 0x05ab: i2c_start_out */
+/* 0x059f: i2c_start_out */
0xf86721f4,
-/* 0x05ad: i2c_stop */
+/* 0x05a1: i2c_stop */
0x0037f000,
- 0x04ca21f5,
+ 0x04be21f5,
0xf50037f0,
- 0xf104e621,
+ 0xf104da21,
0xf403e8e7,
0x37f06721,
- 0xca21f501,
+ 0xbe21f501,
0x88e7f104,
0x6721f413,
0xf50137f0,
- 0xf104e621,
+ 0xf104da21,
0xf41388e7,
0x00f86721,
-/* 0x05e0: i2c_bitw */
- 0x04e621f5,
+/* 0x05d4: i2c_bitw */
+ 0x04da21f5,
0x03e8e7f1,
0xbb6721f4,
0x65b60076,
@@ -1237,18 +1234,18 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x052c21f5,
+ 0x052021f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f067,
- 0x04ca21f5,
+ 0x04be21f5,
0x1388e7f1,
-/* 0x061f: i2c_bitw_out */
+/* 0x0613: i2c_bitw_out */
0xf86721f4,
-/* 0x0621: i2c_bitr */
+/* 0x0615: i2c_bitr */
0x0137f000,
- 0x04e621f5,
+ 0x04da21f5,
0x03e8e7f1,
0xbb6721f4,
0x65b60076,
@@ -1256,19 +1253,19 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x052c21f5,
+ 0x052021f5,
0xf40464b6,
0x21f51b11,
- 0x37f00517,
- 0xca21f500,
+ 0x37f0050b,
+ 0xbe21f500,
0x88e7f104,
0x6721f413,
0xf4013cf0,
-/* 0x0666: i2c_bitr_done */
+/* 0x065a: i2c_bitr_done */
0x00f80131,
-/* 0x0668: i2c_get_byte */
+/* 0x065c: i2c_get_byte */
0xf00057f0,
-/* 0x066e: i2c_get_byte_next */
+/* 0x0662: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1276,7 +1273,7 @@ uint32_t nvd0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60621,
+ 0x64b60615,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1286,12 +1283,12 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xe021f550,
+ 0xd421f550,
0x0464b605,
-/* 0x06b8: i2c_get_byte_done */
-/* 0x06ba: i2c_put_byte */
+/* 0x06ac: i2c_get_byte_done */
+/* 0x06ae: i2c_put_byte */
0x47f000f8,
-/* 0x06bd: i2c_put_byte_next */
+/* 0x06b1: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1299,7 +1296,7 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05e021f5,
+ 0x05d421f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1308,21 +1305,21 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x2121f550,
+ 0x1521f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x0713: i2c_put_byte_done */
+/* 0x0707: i2c_put_byte_done */
0xf80132f4,
-/* 0x0715: i2c_addr */
+/* 0x0709: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60551,
+ 0x64b60545,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1332,23 +1329,23 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb606ba21,
-/* 0x075a: i2c_addr_done */
+ 0xb606ae21,
+/* 0x074e: i2c_addr_done */
0x00f80464,
-/* 0x075c: i2c_acquire_addr */
+/* 0x0750: i2c_acquire_addr */
0xb6f8cec7,
0xe0b705e4,
0x00f8d014,
-/* 0x0768: i2c_acquire */
- 0x075c21f5,
+/* 0x075c: i2c_acquire */
+ 0x075021f5,
0xf00421f4,
0x21f403d9,
-/* 0x0777: i2c_release */
+/* 0x076b: i2c_release */
0xf500f833,
- 0xf4075c21,
+ 0xf4075021,
0xdaf00421,
0x3321f403,
-/* 0x0786: i2c_recv */
+/* 0x077a: i2c_recv */
0x32f400f8,
0xf8c1c701,
0xb00214b6,
@@ -1367,7 +1364,7 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x6821f550,
+ 0x5c21f550,
0x0464b607,
0xd6b0d0fc,
0xb31bf500,
@@ -1377,7 +1374,7 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x1521f550,
+ 0x0921f550,
0x0464b607,
0x00d011f5,
0xbbe0c5c7,
@@ -1386,7 +1383,7 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x06ba21f5,
+ 0x06ae21f5,
0xf50464b6,
0xf000ad11,
0x76bb0157,
@@ -1395,7 +1392,7 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6071521,
+ 0xb6070921,
0x11f50464,
0x76bb008a,
0x0465b600,
@@ -1403,7 +1400,7 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6066821,
+ 0xb6065c21,
0x11f40464,
0xe05bcb6a,
0xb60076bb,
@@ -1411,38 +1408,38 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xad21f550,
+ 0xa121f550,
0x0464b605,
0xbd025bb9,
0x430ef474,
-/* 0x088c: i2c_recv_not_rd08 */
+/* 0x0880: i2c_recv_not_rd08 */
0xf401d6b0,
0x57f03d1b,
- 0x1521f500,
+ 0x0921f500,
0x3311f407,
0xf5e0c5c7,
- 0xf406ba21,
+ 0xf406ae21,
0x57f02911,
- 0x1521f500,
+ 0x0921f500,
0x1f11f407,
0xf5e0b5c7,
- 0xf406ba21,
+ 0xf406ae21,
0x21f51511,
- 0x74bd05ad,
+ 0x74bd05a1,
0xf408c5c7,
0x32f4091b,
0x030ef402,
-/* 0x08cc: i2c_recv_not_wr08 */
-/* 0x08cc: i2c_recv_done */
+/* 0x08c0: i2c_recv_not_wr08 */
+/* 0x08c0: i2c_recv_done */
0xf5f8cec7,
- 0xfc077721,
+ 0xfc076b21,
0xf4d0fce0,
0x7cb90a12,
0x6b21f502,
-/* 0x08e1: i2c_recv_exit */
-/* 0x08e3: i2c_init */
+/* 0x08d5: i2c_recv_exit */
+/* 0x08d7: i2c_init */
0xf800f802,
-/* 0x08e5: test_recv */
+/* 0x08d9: test_recv */
0xd817f100,
0x0011cf05,
0xf10110b6,
@@ -1451,28 +1448,28 @@ uint32_t nvd0_pwr_code[] = {
0xd900e7f1,
0x134fe3f1,
0x01b621f5,
-/* 0x0906: test_init */
+/* 0x08fa: test_init */
0xe7f100f8,
0x21f50800,
0x00f801b6,
-/* 0x0910: idle_recv */
-/* 0x0912: idle */
+/* 0x0904: idle_recv */
+/* 0x0906: idle */
0x31f400f8,
0xd417f100,
0x0011cf05,
0xf10110b6,
0xd005d407,
0x04bd0001,
-/* 0x0928: idle_loop */
+/* 0x091c: idle_loop */
0xf45817f0,
-/* 0x092e: idle_proc */
-/* 0x092e: idle_proc_exec */
+/* 0x0922: idle_proc */
+/* 0x0922: idle_proc_exec */
0x10f90232,
0xf5021eb9,
0xfc027421,
0x0911f410,
0xf40231f4,
-/* 0x0942: idle_proc_next */
+/* 0x0936: idle_proc_next */
0x10b6ef0e,
0x061fb858,
0xf4e61bf4,
@@ -1521,4 +1518,7 @@ uint32_t nvd0_pwr_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
new file mode 100644
index 000000000000..d76612999b9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+#define nvd0_pwr_code gk104_pwr_code
+#define nvd0_pwr_data gk104_pwr_data
+#include "fuc/nvd0.fuc.h"
+
+static void
+gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
+{
+ nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000);
+ nv_rd32(ppwr, 0x000200);
+ nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000);
+ msleep(50);
+
+ nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+ msleep(50);
+
+ nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000);
+ nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000);
+ nv_rd32(ppwr, 0x000200);
+}
+
+struct nouveau_oclass *
+gk104_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xe4),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = _nouveau_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+ .code.data = gk104_pwr_code,
+ .code.size = sizeof(gk104_pwr_code),
+ .data.data = gk104_pwr_data,
+ .data.size = sizeof(gk104_pwr_data),
+ .pgob = gk104_pwr_pgob,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 03de3107d29f..def6a9ac68cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -1,8 +1,7 @@
#ifndef __NVKM_PWR_MEMX_H__
#define __NVKM_PWR_MEMX_H__
-#include <subdev/pwr.h>
-#include <subdev/pwr/fuc/os.h>
+#include "priv.h"
struct nouveau_memx {
struct nouveau_pwr *ppwr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
index 52c85414866a..04ff7c3c34e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nv108.fuc.h"
-struct nv108_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv108_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nv108_pwr_code;
- priv->base.code.size = sizeof(nv108_pwr_code);
- priv->base.data.data = nv108_pwr_data;
- priv->base.data.size = sizeof(nv108_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nv108_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv108_pwr_ctor,
+struct nouveau_oclass *
+nv108_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0x00),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nv108_pwr_code,
+ .code.size = sizeof(nv108_pwr_code),
+ .data.data = nv108_pwr_data,
+ .data.size = sizeof(nv108_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
index c132b7ca9747..998d53076b8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -22,50 +22,29 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nva3.fuc.h"
-struct nva3_pwr_priv {
- struct nouveau_pwr base;
-};
-
static int
nva3_pwr_init(struct nouveau_object *object)
{
- struct nva3_pwr_priv *priv = (void *)object;
- nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
- nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
- return nouveau_pwr_init(&priv->base);
-}
-
-static int
-nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nva3_pwr_code;
- priv->base.code.size = sizeof(nva3_pwr_code);
- priv->base.data.data = nva3_pwr_data;
- priv->base.data.size = sizeof(nva3_pwr_data);
- return 0;
+ struct nouveau_pwr *ppwr = (void *)object;
+ nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000);
+ nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001);
+ return nouveau_pwr_init(ppwr);
}
-struct nouveau_oclass
-nva3_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_pwr_ctor,
+struct nouveau_oclass *
+nva3_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xa3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = nva3_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nva3_pwr_code,
+ .code.size = sizeof(nva3_pwr_code),
+ .data.data = nva3_pwr_data,
+ .data.size = sizeof(nva3_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
index 495f6857428d..9a773e66efa4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nvc0.fuc.h"
-struct nvc0_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nvc0_pwr_code;
- priv->base.code.size = sizeof(nvc0_pwr_code);
- priv->base.data.data = nvc0_pwr_data;
- priv->base.data.size = sizeof(nvc0_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_pwr_ctor,
+struct nouveau_oclass *
+nvc0_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nvc0_pwr_code,
+ .code.size = sizeof(nvc0_pwr_code),
+ .data.data = nvc0_pwr_data,
+ .data.size = sizeof(nvc0_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
index 043aa142fe82..2b29be5d08ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nvd0.fuc.h"
-struct nvd0_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvd0_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nvd0_pwr_code;
- priv->base.code.size = sizeof(nvd0_pwr_code);
- priv->base.data.data = nvd0_pwr_data;
- priv->base.data.size = sizeof(nvd0_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nvd0_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_pwr_ctor,
+struct nouveau_oclass *
+nvd0_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nvd0_pwr_code,
+ .code.size = sizeof(nvd0_pwr_code),
+ .data.data = nvd0_pwr_data,
+ .data.size = sizeof(nvd0_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
new file mode 100644
index 000000000000..3814a341db32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
@@ -0,0 +1,44 @@
+#ifndef __NVKM_PWR_PRIV_H__
+#define __NVKM_PWR_PRIV_H__
+
+#include <subdev/pwr.h>
+#include <subdev/pwr/fuc/os.h>
+
+#define nouveau_pwr_create(p, e, o, d) \
+ nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_pwr_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_pwr_init(p) ({ \
+ struct nouveau_pwr *_ppwr = (p); \
+ _nouveau_pwr_init(nv_object(_ppwr)); \
+})
+#define nouveau_pwr_fini(p,s) ({ \
+ struct nouveau_pwr *_ppwr = (p); \
+ _nouveau_pwr_fini(nv_object(_ppwr), (s)); \
+})
+
+int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+
+int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+#define _nouveau_pwr_dtor _nouveau_subdev_dtor
+int _nouveau_pwr_init(struct nouveau_object *);
+int _nouveau_pwr_fini(struct nouveau_object *, bool);
+
+struct nvkm_pwr_impl {
+ struct nouveau_oclass base;
+ struct {
+ u32 *data;
+ u32 size;
+ } code;
+ struct {
+ u32 *data;
+ u32 size;
+ } data;
+
+ void (*pgob)(struct nouveau_pwr *, bool);
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 668cf964e4a9..2d0988755530 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,7 +28,7 @@
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/bar.h>
struct nvc0_vmmgr_priv {
@@ -116,12 +116,12 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
pte <<= 3;
if (mem->tag) {
- struct nouveau_ltcg *ltcg =
- nouveau_ltcg(vma->vm->vmm->base.base.parent);
+ struct nouveau_ltc *ltc =
+ nouveau_ltc(vma->vm->vmm->base.base.parent);
u32 tag = mem->tag->offset + (delta >> 17);
phys |= (u64)tag << (32 + 12);
next |= (u64)1 << (32 + 12);
- ltcg->tags_clear(ltcg, tag, cnt);
+ ltc->tags_clear(ltc, tag, cnt);
}
while (cnt--) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2a15b98b4d2b..c6361422a0b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,12 +198,12 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
- uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
+ uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
@@ -221,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.mem_latency = 3;
sim_data.mem_page_miss = 10;
} else {
- sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
- sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+ sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1;
+ sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
sim_data.mem_latency = cfg1 & 0xf;
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
}
- if (nv_device(drm->device)->card_type == NV_04)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
nv04_calc_arb(&fifo_data, &sim_data);
else
nv10_calc_arb(&fifo_data, &sim_data);
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->card_type < NV_20)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 41be3424c906..b90aa5c1f90a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -111,8 +111,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nouveau_bios(drm->device);
- struct nouveau_clock *clk = nouveau_clock(drm->device);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
+ struct nouveau_clock *clk = nvkm_clock(&drm->device);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -136,7 +136,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
* has yet been observed in allowing the use a single stage pll on all
* nv43 however. the behaviour of single stage use is untested on nv40
*/
- if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
+ if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
@@ -146,10 +146,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
/* The blob uses this always, so let's do the same */
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
/* again nv40 and some nv43 act more like nv3x as described above */
- if (nv_device(drm->device)->chipset < 0x41)
+ if (drm->device.info.chipset < 0x41)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
@@ -275,7 +275,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
horizEnd = horizTotal - 2;
horizBlankEnd = horizTotal + 4;
#if 0
- if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
+ if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* This reportedly works around some video overlay bandwidth problems */
horizTotal += 2;
#endif
@@ -509,7 +509,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -550,26 +550,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
/* This is what the blob does */
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
else
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
@@ -581,7 +581,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
/* Generic PRAMDAC regs */
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* Only bit that bios and blob set. */
regp->nv10_cursync = (1 << 25);
@@ -590,7 +590,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
if (crtc->primary->fb->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -653,7 +653,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc_mode_set_vga(crtc, adjusted_mode);
/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
nv_crtc_mode_set_regs(crtc, adjusted_mode);
nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
@@ -714,7 +714,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
/* Some more preparation. */
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
}
@@ -888,7 +888,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (nv_device(drm->device)->card_type >= NV_20) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
@@ -915,9 +915,9 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_device *dev = drm->dev;
if (state == ENTER_ATOMIC_MODE_SET)
- nouveau_fbcon_save_disable_accel(dev);
+ nouveau_fbcon_accel_save_disable(dev);
else
- nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_accel_restore(dev);
return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
}
@@ -969,7 +969,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11) {
+ if (drm->device.info.chipset == 0x11) {
pixel = ((pixel & 0x000000ff) << 24) |
((pixel & 0x0000ff00) << 8) |
((pixel & 0x00ff0000) >> 8) |
@@ -1010,7 +1010,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (ret)
goto out;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
else
nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index a810303169de..4e61173c3353 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index a96dda48718e..2d8056cde996 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_timer *ptimer = nouveau_timer(device);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
int i;
for (i = 0; i < 2; i++) {
@@ -95,15 +95,15 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
udelay(100);
/* when level triggers, sense is _LO_ */
- sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
/* take another reading until it agrees with sense_a... */
do {
udelay(100);
- sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_a != sense_b) {
sense_b_prime =
- nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_b == sense_b_prime) {
/* ... unless two consecutive subsequent
* samples agree; sense_a is replaced */
@@ -128,7 +128,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
@@ -164,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
- nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+ nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
for (i = 0; i < 3; i++)
- saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
- saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
+ saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA);
+ saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK);
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -181,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
do {
bool sense_pair[2];
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
/* testing blue won't find monochrome monitors. I don't care */
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
i = 0;
/* take sample pairs until both samples in the pair agree */
@@ -208,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
} while (++blue < 0x18 && sense);
out:
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
for (i = 0; i < 3; i++)
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -231,8 +231,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(device);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nouveau_gpio *gpio = nvkm_gpio(device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -256,12 +256,12 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
- saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
+ saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
if (regoffset == 0x68) {
- saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
- nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+ saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
if (gpio) {
@@ -283,7 +283,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
- if (nv_device(drm->device)->card_type >= NV_40) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
if (dcb->type == DCB_OUTPUT_TV)
routput |= 0x1a << 16;
else
@@ -316,8 +316,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
if (regoffset == 0x68)
- nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
if (gpio) {
gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
@@ -398,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
}
/* This could use refinement for flatpanels, but it should work this way */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index e57babb206d3..42a5435259f7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -335,7 +335,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
else /* gpu needs to scale */
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
- if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+ if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
output_mode->clock > 165000)
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
}
} else {
- if (nv_device(drm->device)->chipset != 0x11) {
+ if (drm->device.info.chipset != 0x11) {
/* reset them */
int i;
for (i = 0; i < 3; i++) {
@@ -463,7 +463,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
/* This could use refinement for flatpanels, but it should work this way */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
{
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
@@ -623,7 +623,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, 2);
struct nouveau_i2c_board_info info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4342fdaee707..3d0afa1c6cff 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -22,9 +22,6 @@
* Author: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/class.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -34,8 +31,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
-#include <subdev/i2c.h>
-
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -58,7 +53,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -70,6 +65,8 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
+ nvif_object_map(nvif_object(&drm->device));
+
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
@@ -144,6 +141,7 @@ void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
@@ -170,6 +168,8 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
kfree(disp);
+
+ nvif_object_unmap(nvif_object(&drm->device));
}
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 4245fc3dab70..17b899d9aba3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -131,7 +131,7 @@ nv_two_heads(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
return true;
@@ -150,7 +150,7 @@ nv_two_reg_pll(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
+ if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
return false;
}
@@ -171,8 +171,8 @@ static inline void
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
struct nvbios_init init = {
.subdev = nv_subdev(bios),
.bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index aca76af115b3..3d4c19300768 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,9 +27,6 @@
#include "hw.h"
#include <subdev/bios/pll.h>
-#include <subdev/fb.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
@@ -92,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
if (owner == 1)
owner *= 3;
- if (nv_device(drm->device)->chipset == 0x11) {
+ if (drm->device.info.chipset == 0x11) {
/* This might seem stupid, but the blob does it and
* omitting it often locks the system up.
*/
@@ -103,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
/* CR44 is always changed on CRTC0 */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
- if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */
+ if (drm->device.info.chipset == 0x11) { /* set me harder */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
}
@@ -152,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
pllvals->NM1 = pll1 & 0xffff;
if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
pllvals->NM2 = pll2 & 0xffff;
- else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
+ else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) {
pllvals->M1 &= 0xf; /* only 4 bits */
if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -168,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
struct nouveau_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_bios *bios = nvkm_bios(device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -178,16 +175,16 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
if (ret || !(reg1 = pll_lim.reg))
return -ENOENT;
- pll1 = nv_rd32(device, reg1);
+ pll1 = nvif_rd32(device, reg1);
if (reg1 <= 0x405c)
- pll2 = nv_rd32(device, reg1 + 4);
+ pll2 = nvif_rd32(device, reg1 + 4);
else if (nv_two_reg_pll(dev)) {
uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
- pll2 = nv_rd32(device, reg2);
+ pll2 = nvif_rd32(device, reg2);
}
- if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
/* check whether vpll has been forced into single stage mode */
@@ -255,9 +252,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_clock *clk = nouveau_clock(device);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_clock *clk = nvkm_clock(device);
+ struct nouveau_bios *bios = nvkm_bios(device);
struct nvbios_pll pll_lim;
struct nouveau_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -394,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
if (nv_gf4_disp_arch(dev))
regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
- if (nv_device(drm->device)->chipset >= 0x30)
+ if (drm->device.info.chipset >= 0x30)
regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -450,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -466,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_clock *clk = nouveau_clock(drm->device);
+ struct nouveau_clock *clk = nvkm_clock(&drm->device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
clk->pll_prog(clk, pllreg, &regp->pllvals);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
if (nv_two_heads(dev))
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
- if (nv_device(drm->device)->chipset >= 0x30)
+ if (drm->device.info.chipset >= 0x30)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -522,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -603,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (nv_device(drm->device)->card_type >= NV_20)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
rd_cio_state(dev, head, regp, 0x9f);
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -615,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
if (nv_two_heads(dev))
@@ -634,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -663,14 +660,13 @@ nv_load_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_timer *ptimer = nouveau_timer(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
if (nv_two_heads(dev))
/* setting ENGINE_CTRL (EC) *must* come before
* CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -678,24 +674,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
*/
NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
- nv_wr32(device, NV_PVIDEO_STOP, 1);
- nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
- nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
- nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
- nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+ nvif_wr32(device, NV_PVIDEO_STOP, 1);
+ nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
+ nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
+ nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -718,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (nv_device(drm->device)->card_type >= NV_20)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
wr_cio_state(dev, head, regp, 0x9f);
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -742,7 +738,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
/* NV11 and NV20 stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
- if (nv_device(drm->device)->card_type < NV_20) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
@@ -769,15 +765,15 @@ static void
nv_save_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
int head_offset = head * NV_PRMDIO_SIZE, i;
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
- nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+ nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
- state->crtc_reg[head].DAC[i] = nv_rd08(device,
+ state->crtc_reg[head].DAC[i] = nvif_rd08(device,
NV_PRMDIO_PALETTE_DATA + head_offset);
}
@@ -788,15 +784,15 @@ void
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
int head_offset = head * NV_PRMDIO_SIZE, i;
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
state->crtc_reg[head].DAC[i]);
}
@@ -808,7 +804,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
/* NB: no attempt is made to restore the bad pll later on */
nouveau_hw_fix_bad_vpll(dev, head);
nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index eeb70d912d99..7f53c571f31f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,41 +60,41 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
static inline uint32_t NVReadCRTC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint32_t val;
if (head)
reg += NV_PCRTC0_SIZE;
- val = nv_rd32(device, reg);
+ val = nvif_rd32(device, reg);
return val;
}
static inline void NVWriteCRTC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (head)
reg += NV_PCRTC0_SIZE;
- nv_wr32(device, reg, val);
+ nvif_wr32(device, reg, val);
}
static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint32_t val;
if (head)
reg += NV_PRAMDAC0_SIZE;
- val = nv_rd32(device, reg);
+ val = nvif_rd32(device, reg);
return val;
}
static inline void NVWriteRAMDAC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (head)
reg += NV_PRAMDAC0_SIZE;
- nv_wr32(device, reg, val);
+ nvif_wr32(device, reg, val);
}
static inline uint8_t nv_read_tmds(struct drm_device *dev,
@@ -120,18 +120,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
static inline void NVWriteVgaCrtc(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
- nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
}
static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
int head, uint8_t index)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint8_t val;
- nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
- val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
return val;
}
@@ -165,74 +165,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t val;
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && nv_device(drm->device)->card_type == NV_40)
+ if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
- val = nv_rd08(device, reg);
+ val = nvif_rd08(device, reg);
return val;
}
static inline void NVWritePRMVIO(struct drm_device *dev,
int head, uint32_t reg, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && nv_device(drm->device)->card_type == NV_40)
+ if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
- nv_wr08(device, reg, value);
+ nvif_wr08(device, reg, value);
}
static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
}
static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
}
static inline void NVWriteVgaAttr(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
index |= 0x20;
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
- nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
}
static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
int head, uint8_t index)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint8_t val;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
index |= 0x20;
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
- val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
return val;
}
@@ -259,11 +259,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
static inline bool
nv_heads_tied(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11)
- return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
+ if (drm->device.info.chipset == 0x11)
+ return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
}
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
/* NV11 has independently lockable extended crtcs, except when tied */
- if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
+ if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev))
NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE :
NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+ return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
}
static inline void
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
- if (nv_device(drm->device)->card_type == NV_04) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) {
/*
* Hilarious, the 24th bit doesn't want to stick to
* PCRTC_START...
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
}
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
bpp = 8;
/* Alignment requirements taken from the Haiku driver */
- if (nv_device(drm->device)->card_type == NV_04)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
mask = 128 / bpp - 1;
else
mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ab03f7719d2d..b36afcbbc83f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -117,7 +117,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format > 0xffff)
return -ERANGE;
- if (dev->chipset >= 0x30) {
+ if (dev->info.chipset >= 0x30) {
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
return -ERANGE;
} else {
@@ -131,17 +131,17 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
- nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
+ nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
+ nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
- nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
- nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
- nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
- nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
- nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
- nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
- nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
- nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
+ nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
+ nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
+ nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
+ nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
+ nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
if (fb->pixel_format != DRM_FORMAT_UYVY)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
@@ -153,14 +153,14 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
if (fb->pixel_format == DRM_FORMAT_NV12) {
- nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
- nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
+ nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
+ nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nv_fb->nvbo->bo.offset + fb->offsets[1]);
}
- nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
- nv_wr32(dev, NV_PVIDEO_STOP, 0);
+ nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+ nvif_wr32(dev, NV_PVIDEO_STOP, 0);
/* TODO: wait for vblank? */
- nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
+ nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
nv_plane->flip = !flip;
if (cur)
@@ -172,10 +172,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv10_disable_plane(struct drm_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
- nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ nvif_wr32(dev, NV_PVIDEO_STOP, 1);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
@@ -195,24 +195,24 @@ nv_destroy_plane(struct drm_plane *plane)
static void
nv10_set_params(struct nouveau_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->base.dev);
+ struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
(cos_mul(plane->hue, plane->saturation) & 0xffff);
u32 format = 0;
- nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
- nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
- nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
- nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
- nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
+ nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
+ nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
+ nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
+ nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
+ nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
if (plane->cur) {
if (plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
- nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
+ nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
format);
@@ -256,7 +256,7 @@ static const struct drm_plane_funcs nv10_plane_funcs = {
static void
nv10_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int num_formats = ARRAY_SIZE(formats);
int ret;
@@ -264,7 +264,7 @@ nv10_overlay_init(struct drm_device *device)
if (!plane)
return;
- switch (dev->chipset) {
+ switch (drm->device.info.chipset) {
case 0x10:
case 0x11:
case 0x15:
@@ -333,7 +333,7 @@ cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
- nv_error(dev, "Failed to create plane\n");
+ NV_ERROR(drm, "Failed to create plane\n");
}
static int
@@ -343,7 +343,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_bo *cur = nv_plane->cur;
@@ -375,43 +375,43 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
for (i = 0; i < 2; i++) {
- nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
nv_fb->nvbo->bo.offset);
- nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
- nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
}
- nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
- nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
- nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+ nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+ nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_STEP_SIZE,
(uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
/* It should be possible to convert hue/contrast to this */
- nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
- nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
- nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
- nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+ nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+ nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+ nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+ nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
- nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
- nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+ nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+ nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
- nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
- nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+ nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+ nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
- nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+ nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
if (nv_plane->colorkey & (1 << 24))
overlay |= 0x10;
if (fb->pixel_format == DRM_FORMAT_YUYV)
overlay |= 0x100;
- nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+ nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
if (cur)
nouveau_bo_unpin(cur);
@@ -422,13 +422,13 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv04_disable_plane(struct drm_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
- nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
- nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+ nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
@@ -447,7 +447,7 @@ static const struct drm_plane_funcs nv04_plane_funcs = {
static void
nv04_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int ret;
@@ -483,15 +483,15 @@ cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
- nv_error(dev, "Failed to create plane\n");
+ NV_ERROR(drm, "Failed to create plane\n");
}
void
nouveau_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
- if (dev->chipset < 0x10)
+ struct nvif_device *dev = &nouveau_drm(device)->device;
+ if (dev->info.chipset < 0x10)
nv04_overlay_init(device);
- else if (dev->chipset <= 0x40)
+ else if (dev->info.chipset <= 0x40)
nv10_overlay_init(device);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 8667620b703a..8061d8d0ce79 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,8 +35,6 @@
#include <drm/i2c/ch7006.h>
-#include <subdev/i2c.h>
-
static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
{
{
@@ -56,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
nv04_tv_encoder_info, NULL, NULL);
@@ -206,7 +204,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 195bd8e86c6a..72d2ab04db47 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -34,11 +34,6 @@
#include "hw.h"
#include "tvnv17.h"
-#include <core/device.h>
-
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
-
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -51,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -135,17 +130,17 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_object *device = drm->device;
+ struct nvif_device *device = &drm->device;
/* Zotac FX5200 */
- if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
- nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
+ if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) ||
+ nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) {
*pin_mask = 0xc;
return false;
}
/* MSI nForce2 IGP */
- if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
+ if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
@@ -167,8 +162,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_disconnected;
if (reliable) {
- if (nv_device(drm->device)->chipset == 0x42 ||
- nv_device(drm->device)->chipset == 0x43)
+ if (drm->device.info.chipset == 0x42 ||
+ drm->device.info.chipset == 0x43)
tv_enc->pin_mask =
nv42_tv_sample_load(encoder) >> 28 & 0xe;
else
@@ -375,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -448,7 +443,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
dacclk |= 0x1a << 16;
if (tv_norm->kind == CTV_ENC_MODE) {
@@ -505,7 +500,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
tv_regs->ptv_614 = 0x13;
}
- if (nv_device(drm->device)->card_type >= NV_30) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
tv_regs->ptv_500 = 0xe8e0;
tv_regs->ptv_504 = 0x1710;
tv_regs->ptv_604 = 0x0;
@@ -600,7 +595,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
/* This could use refinement for flatpanels, but it should work */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0xf0000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b331543a41b..225894cdcac2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -130,14 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, reg, val);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_wr32(device, reg, val);
}
static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
- return nv_rd32(device, reg);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ return nvif_rd32(device, reg);
}
static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b13f441c6431..615714c1727d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,16 +21,10 @@
*
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/device.h>
-#include <core/class.h>
-#include <core/mm.h>
-
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <subdev/instmem.h>
-#include <engine/graph.h>
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/ioctl.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -47,20 +41,20 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
+ struct nv_device_v0 args = {
+ .device = ~0ULL,
+ };
+
INIT_LIST_HEAD(&abi16->channels);
- abi16->client = nv_object(cli);
/* allocate device object targeting client's default
* device (ie. the one that belongs to the fd it
* opened)
*/
- if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
- NVDRM_DEVICE, 0x0080,
- &(struct nv_device_class) {
- .device = ~0ULL,
- },
- sizeof(struct nv_device_class),
- &abi16->device) == 0)
+ if (nvif_device_init(&cli->base.base, NULL,
+ NOUVEAU_ABI16_DEVICE, NV_DEVICE,
+ &args, sizeof(args),
+ &abi16->device) == 0)
return cli->abi16;
kfree(cli->abi16);
@@ -75,7 +69,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)abi16->client;
+ struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
mutex_unlock(&cli->mutex);
return ret;
}
@@ -83,21 +77,19 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
u16
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
- switch (nv_device(drm->device)->card_type) {
- case NV_04:
+ switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TNT:
return 0x006e;
- case NV_10:
- case NV_11:
- case NV_20:
- case NV_30:
- case NV_40:
+ case NV_DEVICE_INFO_V0_CELSIUS:
+ case NV_DEVICE_INFO_V0_KELVIN:
+ case NV_DEVICE_INFO_V0_RANKINE:
+ case NV_DEVICE_INFO_V0_CURIE:
return 0x016e;
- case NV_50:
+ case NV_DEVICE_INFO_V0_TESLA:
return 0x506e;
- case NV_C0:
- case NV_D0:
- case NV_E0:
- case GM100:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return 0x906e;
}
@@ -140,7 +132,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
- abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff));
+ abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
nouveau_channel_del(&chan->chan);
}
@@ -151,7 +143,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)abi16->client;
+ struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
struct nouveau_abi16_chan *chan, *temp;
/* cleanup channels */
@@ -160,7 +152,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
}
/* destroy the device object */
- nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
+ nvif_device_fini(&abi16->device);
kfree(cli->abi16);
cli->abi16 = NULL;
@@ -169,30 +161,31 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_timer *ptimer = nouveau_timer(device);
- struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
+ struct nouveau_graph *graph = nvkm_gr(device);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
- getparam->value = device->chipset;
+ getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nv_device_is_pci(device))
+ if (nv_device_is_pci(nvkm_device(device)))
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nv_device_is_pci(device))
+ if (nv_device_is_pci(nvkm_device(device)))
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nv_device_is_pci(device))
+ if (!nv_device_is_pci(nvkm_device(device)))
getparam->value = 3;
else
if (drm_pci_device_is_agp(dev))
@@ -225,7 +218,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = graph->units ? graph->units(graph) : 0;
break;
default:
- nv_debug(device, "unknown parameter %lld\n", getparam->param);
+ NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -246,10 +239,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
- struct nouveau_client *client;
- struct nouveau_device *device;
- struct nouveau_instmem *imem;
- struct nouveau_fb *pfb;
+ struct nvif_device *device;
int ret;
if (unlikely(!abi16))
@@ -258,21 +248,18 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
- client = nv_client(abi16->client);
- device = nv_device(abi16->device);
- imem = nouveau_instmem(device);
- pfb = nouveau_fb(device);
+ device = &abi16->device;
/* hack to allow channel engine type specification on kepler */
- if (device->card_type >= NV_E0) {
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle != ~0)
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
else
init->fb_ctxdma_handle = init->tt_ctxdma_handle;
/* allow flips to be executed if this is a graphics channel */
init->tt_ctxdma_handle = 0;
- if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
init->tt_ctxdma_handle = 1;
}
@@ -293,13 +280,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1ULL << init->channel);
/* create channel object and initialise dma and fence management */
- ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
- init->channel, init->fb_ctxdma_handle,
+ ret = nouveau_channel_new(drm, device,
+ NOUVEAU_ABI16_CHAN(init->channel),
+ init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
- if (device->card_type >= NV_50)
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
@@ -308,10 +296,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- if (device->card_type < NV_10) {
+ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
- init->subchan[1].handle = NvSw;
+ init->subchan[1].handle = chan->chan->nvsw.handle;
init->subchan[1].grclass = 0x506e;
init->nr_subchan = 2;
}
@@ -324,8 +312,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- if (device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
&chan->ntfy_vma);
if (ret)
goto done;
@@ -343,6 +331,18 @@ done:
return nouveau_abi16_put(abi16, ret);
}
+static struct nouveau_abi16_chan *
+nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
+{
+ struct nouveau_abi16_chan *chan;
+
+ list_for_each_entry(chan, &abi16->channels, head) {
+ if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+ return chan;
+ }
+
+ return NULL;
+}
int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
@@ -350,28 +350,38 @@ nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
struct drm_nouveau_channel_free *req = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
- int ret = -ENOENT;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry(chan, &abi16->channels, head) {
- if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
- nouveau_abi16_chan_fini(abi16, chan);
- return nouveau_abi16_put(abi16, 0);
- }
- }
-
- return nouveau_abi16_put(abi16, ret);
+ chan = nouveau_abi16_chan(abi16, req->channel);
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+ nouveau_abi16_chan_fini(abi16, chan);
+ return nouveau_abi16_put(abi16, 0);
}
int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ } args = {
+ .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
+ .ioctl.type = NVIF_IOCTL_V0_NEW,
+ .ioctl.path_nr = 3,
+ .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
+ .new.route = NVDRM_OBJECT_ABI16,
+ .new.handle = init->handle,
+ .new.oclass = init->class,
+ };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_object *object;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
@@ -379,6 +389,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
+ client = nvif_client(nvif_object(&abi16->device));
/* compatibility with userspace that assumes 506e for all chipsets */
if (init->class == 0x506e) {
@@ -387,8 +398,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, 0);
}
- ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
- init->handle, init->class, NULL, 0, &object);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
return nouveau_abi16_put(abi16, ret);
}
@@ -396,29 +406,38 @@ int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_notifierobj_alloc *info = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ struct nv_dma_v0 ctxdma;
+ } args = {
+ .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
+ .ioctl.type = NVIF_IOCTL_V0_NEW,
+ .ioctl.path_nr = 3,
+ .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
+ .new.route = NVDRM_OBJECT_ABI16,
+ .new.handle = info->handle,
+ .new.oclass = NV_DMA_IN_MEMORY,
+ };
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nouveau_object *object;
- struct nv_dma_class args = {};
+ struct nvif_device *device = &abi16->device;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
/* completely unnecessary for these chipsets... */
- if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
+ if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
+ client = nvif_client(nvif_object(&abi16->device));
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
- chan = temp;
- break;
- }
- }
-
+ chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
@@ -434,26 +453,29 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- args.start = ntfy->node->offset;
- args.limit = ntfy->node->offset + ntfy->node->length - 1;
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
- args.start += chan->ntfy_vma.offset;
- args.limit += chan->ntfy_vma.offset;
+ args.ctxdma.start = ntfy->node->offset;
+ args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.ctxdma.target = NV_DMA_V0_TARGET_VM;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
+ args.ctxdma.start += chan->ntfy_vma.offset;
+ args.ctxdma.limit += chan->ntfy_vma.offset;
} else
if (drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
- args.start += drm->agp.base + chan->ntfy->bo.offset;
- args.limit += drm->agp.base + chan->ntfy->bo.offset;
+ args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
+ args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
+ args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
+ client->super = true;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
- args.start += chan->ntfy->bo.offset;
- args.limit += chan->ntfy->bo.offset;
+ args.ctxdma.target = NV_DMA_V0_TARGET_VM;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
+ args.ctxdma.start += chan->ntfy->bo.offset;
+ args.ctxdma.limit += chan->ntfy->bo.offset;
}
- ret = nouveau_object_new(abi16->client, chan->chan->handle,
- ntfy->handle, 0x003d, &args,
- sizeof(args), &object);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
+ client->super = false;
if (ret)
goto done;
@@ -469,28 +491,36 @@ int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.owner = NVDRM_OBJECT_ABI16,
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ .ioctl.path_nr = 4,
+ .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
+ .ioctl.path[0] = fini->handle,
+ };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
- chan = temp;
- break;
- }
- }
-
+ chan = nouveau_abi16_chan(abi16, fini->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
+ client = nvif_client(nvif_object(&abi16->device));
/* synchronize with the user channel and destroy the gpu object */
nouveau_channel_idle(chan->chan);
- ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
if (ret)
return nouveau_abi16_put(abi16, ret);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 90004081a501..39844e6bfbff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -28,8 +28,7 @@ struct nouveau_abi16_chan {
};
struct nouveau_abi16 {
- struct nouveau_object *client;
- struct nouveau_object *device;
+ struct nvif_device device;
struct list_head channels;
u64 handles;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 51666daddb94..1f6f6ba6847a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -1,7 +1,5 @@
#include <linux/module.h>
-#include <core/device.h>
-
#include "nouveau_drm.h"
#include "nouveau_agp.h"
#include "nouveau_reg.h"
@@ -29,7 +27,7 @@ static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
static unsigned long
get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
{
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
int agpmode = nouveau_agpmode;
unsigned long mode = info->mode;
@@ -38,7 +36,7 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
* FW seems to be broken on nv18, it makes the card lock up
* randomly.
*/
- if (device->chipset == 0x18)
+ if (device->info.chipset == 0x18)
mode &= ~PCI_AGP_COMMAND_FW;
/*
@@ -47,10 +45,10 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
while (agpmode == -1 && quirk->hostbridge_vendor) {
if (info->id_vendor == quirk->hostbridge_vendor &&
info->id_device == quirk->hostbridge_device &&
- device->pdev->vendor == quirk->chip_vendor &&
- device->pdev->device == quirk->chip_device) {
+ nvkm_device(device)->pdev->vendor == quirk->chip_vendor &&
+ nvkm_device(device)->pdev->device == quirk->chip_device) {
agpmode = quirk->mode;
- nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
+ NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
agpmode);
break;
}
@@ -104,7 +102,7 @@ void
nouveau_agp_reset(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct drm_device *dev = drm->dev;
u32 save[2];
int ret;
@@ -115,7 +113,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
- if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
+ if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -134,15 +132,15 @@ nouveau_agp_reset(struct nouveau_drm *drm)
/* clear busmaster bit, and disable AGP */
- save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
- nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
+ save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
+ nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
/* reset PGRAPH, PFIFO and PTIMER */
- save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
- nv_mask(device, 0x000200, 0x00011100, save[1]);
+ save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
+ nvif_mask(device, 0x000200, 0x00011100, save[1]);
/* and restore bustmaster bit (gives effect of resetting AGP) */
- nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
+ nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
#endif
}
@@ -150,7 +148,6 @@ void
nouveau_agp_init(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
- struct nouveau_device *device = nv_device(drm->device);
struct drm_device *dev = drm->dev;
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -162,13 +159,13 @@ nouveau_agp_init(struct nouveau_drm *drm)
ret = drm_agp_acquire(dev);
if (ret) {
- nv_error(device, "unable to acquire AGP: %d\n", ret);
+ NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
return;
}
ret = drm_agp_info(dev, &info);
if (ret) {
- nv_error(device, "unable to get AGP info: %d\n", ret);
+ NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
return;
}
@@ -177,7 +174,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
ret = drm_agp_enable(dev, mode);
if (ret) {
- nv_error(device, "unable to enable AGP: %d\n", ret);
+ NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2c1e4aad7da3..e566c5b53651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,8 +40,8 @@ static int
nv40_get_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nouveau_device *device = nv_device(drm->device);
- int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
+ struct nvif_device *device = &drm->device;
+ int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
NV40_PMC_BACKLIGHT_MASK) >> 16;
return val;
@@ -51,11 +51,11 @@ static int
nv40_set_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int val = bd->props.brightness;
- int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
+ int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
- nv_wr32(device, NV40_PMC_BACKLIGHT,
+ nvif_wr32(device, NV40_PMC_BACKLIGHT,
(val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
return 0;
@@ -71,11 +71,11 @@ static int
nv40_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct backlight_properties props;
struct backlight_device *bd;
- if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+ if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -97,12 +97,12 @@ nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div = 1025;
u32 val;
- val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+ val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NV50_PDISP_SOR_PWM_CTL_VAL;
return ((val * 100) + (div / 2)) / div;
}
@@ -112,12 +112,12 @@ nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
- nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
+ nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
NV50_PDISP_SOR_PWM_CTL_NEW | val);
return 0;
}
@@ -133,12 +133,12 @@ nva3_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div, val;
- div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
- val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+ div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+ val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
if (div && div >= val)
return ((val * 100) + (div / 2)) / div;
@@ -151,14 +151,14 @@ nva3_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div, val;
- div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+ div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
val = (bd->props.brightness * div) / 100;
if (div) {
- nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
+ nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
NV50_PDISP_SOR_PWM_CTL_NEW |
NVA3_PDISP_SOR_PWM_CTL_UNK);
return 0;
@@ -177,7 +177,7 @@ static int
nv50_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
@@ -190,12 +190,12 @@ nv50_backlight_init(struct drm_connector *connector)
return -ENODEV;
}
- if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
return 0;
- if (device->chipset <= 0xa0 ||
- device->chipset == 0xaa ||
- device->chipset == 0xac)
+ if (device->info.chipset <= 0xa0 ||
+ device->info.chipset == 0xaa ||
+ device->info.chipset == 0xac)
ops = &nv50_bl_ops;
else
ops = &nva3_bl_ops;
@@ -218,7 +218,7 @@ int
nouveau_backlight_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -226,13 +226,12 @@ nouveau_backlight_init(struct drm_device *dev)
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- switch (device->card_type) {
- case NV_40:
+ switch (device->info.family) {
+ case NV_DEVICE_INFO_V0_CURIE:
return nv40_backlight_init(connector);
- case NV_50:
- case NV_C0:
- case NV_D0:
- case NV_E0:
+ case NV_DEVICE_INFO_V0_TESLA:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8268a4ccac15..dae2c96deef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,8 +22,6 @@
* SOFTWARE.
*/
-#include <subdev/bios.h>
-
#include <drm/drmP.h>
#include "nouveau_drm.h"
@@ -217,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nvbios *bios = &drm->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
@@ -240,7 +238,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
NV_INFO(drm, "Calling LVDS script %d:\n", script);
/* don't let script change pll->head binding */
- sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
if (lvds_ver < 0x30)
ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -252,7 +250,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
- nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
return ret;
}
@@ -320,7 +318,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
static int
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
/*
* The fp strap is normally dictated by the "User Strap" in
@@ -334,10 +332,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
- if (device->card_type >= NV_50)
- return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+ return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
- return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+ return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
}
static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
@@ -636,7 +634,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nvbios *bios = &drm->vbios;
int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
@@ -670,7 +668,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
}
/* don't let script change pll->head binding */
- sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -1253,7 +1251,7 @@ olddcb_table(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
u8 *dcb = NULL;
- if (nv_device(drm->device)->card_type > NV_04)
+ if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT)
dcb = ROMPTR(dev, drm->vbios.data[0x36]);
if (!dcb) {
NV_WARN(drm, "No DCB data found in VBIOS\n");
@@ -1399,6 +1397,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_output *entry)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ int link = 0;
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
@@ -1444,6 +1443,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
+ link = entry->lvdsconf.sor.link;
}
if (conf & mask) {
/*
@@ -1492,17 +1492,18 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->dpconf.link_nr = 1;
break;
}
+ link = entry->dpconf.sor.link;
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
entry->extdev = (conf & 0x0000ff00) >> 8;
+ link = entry->tmdsconf.sor.link;
}
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
-
break;
case DCB_OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
@@ -1526,6 +1527,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x100000)
entry->i2c_upper_default = true;
+ entry->hasht = (entry->location << 4) | entry->type;
+ entry->hashm = (entry->heads << 8) | (link << 6) | entry->or;
return true;
}
@@ -1908,7 +1911,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
uint8_t bytes_to_write;
uint16_t hwsq_entry_offset;
int i;
@@ -1931,15 +1934,15 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
/* set sequencer control */
- nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+ nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
bytes_to_write -= 4;
/* write ucode */
for (i = 0; i < bytes_to_write; i += 4)
- nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+ nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
/* twiddle NV_PBUS_DEBUG_4 */
- nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
+ nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
return 0;
}
@@ -2002,7 +2005,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nouveau_bios(drm->device);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2054,7 +2057,7 @@ nouveau_bios_posted(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
unsigned htotal;
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return true;
htotal = NVReadVgaCrtc(dev, 0, 0x06);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ba29a701ca1d..eea74b127b03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,13 +27,9 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
-#include <core/engine.h>
+#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_fb_tile *tile = &pfb->tile.region[i];
struct nouveau_engine *engine;
@@ -92,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct nouveau_fence *fence)
+ struct fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = nouveau_fence_ref(fence);
+ tile->fence = (struct nouveau_fence *)fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
- if (device->card_type < NV_50) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->tile_mode) {
- if (device->chipset >= 0x40) {
+ if (device->info.chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x30) {
+ } else if (device->info.chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x20) {
+ } else if (device->info.chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x10) {
+ } else if (device->info.chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
int lpg_shift = 12;
int max_size;
- if (drm->client.base.vm)
- lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ if (drm->client.vm)
+ lpg_shift = drm->client.vm->vmm->lpg_shift;
max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
- nv_warn(drm, "skipped size %x\n", (u32)size);
+ NV_WARN(drm, "skipped size %x\n", (u32)size);
return -EINVAL;
}
@@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
- if (drm->client.base.vm) {
+ if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
+ nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -245,27 +241,26 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
}
static void
-set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
{
*n = 0;
if (type & TTM_PL_FLAG_VRAM)
- pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
+ pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
if (type & TTM_PL_FLAG_TT)
- pl[(*n)++] = TTM_PL_FLAG_TT | flags;
+ pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
if (type & TTM_PL_FLAG_SYSTEM)
- pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
+ pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
}
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
+ u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
+ unsigned i, fpfn, lpfn;
- if ((nv_device(drm->device)->card_type == NV_10 ||
- nv_device(drm->device)->card_type == NV_11) &&
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -275,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
* at the same time.
*/
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
- nvbo->placement.fpfn = vram_pages / 2;
- nvbo->placement.lpfn = ~0;
+ fpfn = vram_pages / 2;
+ lpfn = ~0;
} else {
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = vram_pages / 2;
+ fpfn = 0;
+ lpfn = vram_pages / 2;
+ }
+ for (i = 0; i < nvbo->placement.num_placement; ++i) {
+ nvbo->placements[i].fpfn = fpfn;
+ nvbo->placements[i].lpfn = lpfn;
+ }
+ for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+ nvbo->busy_placements[i].fpfn = fpfn;
+ nvbo->busy_placements[i].lpfn = lpfn;
}
}
}
@@ -500,21 +503,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- if (nv_device(drm->device)->card_type >= NV_50) {
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ /* Some BARs do not support being ioremapped WC */
+ if (nvkm_bar(&drm->device)->iomap_uncached) {
+ man->available_caching = TTM_PL_FLAG_UNCACHED;
+ man->default_caching = TTM_PL_FLAG_UNCACHED;
+ }
+
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
} else {
man->func = &ttm_bo_manager_func;
}
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
if (drm->agp.stat != ENABLED)
@@ -763,9 +773,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, NvDmaFB);
- OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, chan->drm->ntfy.handle);
+ OUT_RING (chan, chan->vram.handle);
+ OUT_RING (chan, chan->vram.handle);
}
return ret;
@@ -852,7 +862,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->drm->ntfy.handle);
}
return ret;
@@ -864,7 +874,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
{
if (mem->mem_type == TTM_PL_TT)
return NvDmaTT;
- return NvDmaFB;
+ return chan->vram.handle;
}
static int
@@ -922,12 +932,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[1]);
if (ret) {
nouveau_vm_put(&old_node->vma[0]);
@@ -945,6 +955,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_fence *fence;
int ret;
@@ -952,20 +963,21 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
return ret;
}
- mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
- ret = nouveau_fence_sync(bo->sync_obj, chan);
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+ ret = nouveau_fence_sync(nouveau_bo(bo), chan, true);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
- ret = ttm_bo_move_accel_cleanup(bo, fence,
+ ret = ttm_bo_move_accel_cleanup(bo,
+ &fence->base,
evict,
no_wait_gpu,
new_mem);
@@ -973,7 +985,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
}
}
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
return ret;
}
@@ -1005,9 +1017,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
int ret;
do {
- struct nouveau_object *object;
struct nouveau_channel *chan;
- u32 handle = (mthd->engine << 16) | mthd->oclass;
if (mthd->engine)
chan = drm->cechan;
@@ -1016,13 +1026,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
- mthd->oclass, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL,
+ mthd->oclass | (mthd->engine << 16),
+ mthd->oclass, NULL, 0,
+ &drm->ttm.copy);
if (ret == 0) {
- ret = mthd->init(chan, handle);
+ ret = mthd->init(chan, drm->ttm.copy.handle);
if (ret) {
- nouveau_object_del(nv_object(drm),
- chan->handle, handle);
+ nvif_object_fini(&drm->ttm.copy);
continue;
}
@@ -1040,12 +1051,15 @@ static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
- u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_place placement_memtype = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ };
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
int ret;
- placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
placement.placement = placement.busy_placement = &placement_memtype;
@@ -1073,12 +1087,15 @@ static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
- u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_place placement_memtype = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ };
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
int ret;
- placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
placement.placement = placement.busy_placement = &placement_memtype;
@@ -1135,7 +1152,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -1151,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
+ struct fence *fence = reservation_object_get_excl(bo->resv);
- nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
+ nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
}
@@ -1166,7 +1184,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@@ -1196,14 +1214,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
}
/* Fallback to software copy. */
- spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
- spin_unlock(&bo->bdev->fence_lock);
if (ret == 0)
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1227,7 +1243,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *node = mem->mm_node;
- struct drm_device *dev = drm->dev;
int ret;
mem->bus.addr = NULL;
@@ -1246,19 +1261,19 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+ mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
}
#endif
- if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
+ mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
mem->bus.is_iomem = true;
- if (nv_device(drm->device)->card_type >= NV_50) {
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1278,7 +1293,7 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
struct nouveau_mem *node = mem->mm_node;
if (!node->bar_vma.node)
@@ -1292,15 +1307,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_device *device = nv_device(drm->device);
- u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
- int ret;
+ struct nvif_device *device = &drm->device;
+ u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
+ int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (nv_device(drm->device)->card_type < NV_50 ||
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nouveau_bo_tile_layout(nvbo))
return 0;
@@ -1315,13 +1330,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
/* make sure bo is in mappable vram */
- if (nv_device(drm->device)->card_type >= NV_50 ||
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->mem.start + bo->mem.num_pages < mappable)
return 0;
+ for (i = 0; i < nvbo->placement.num_placement; ++i) {
+ nvbo->placements[i].fpfn = 0;
+ nvbo->placements[i].lpfn = mappable;
+ }
+
+ for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+ nvbo->busy_placements[i].fpfn = 0;
+ nvbo->busy_placements[i].lpfn = mappable;
+ }
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false);
}
@@ -1333,6 +1355,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1349,8 +1372,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1370,17 +1394,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = nv_device_map_page(device,
- ttm->pages[i]);
- if (!ttm_dma->dma_address[i]) {
+ dma_addr_t addr;
+
+ addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(pdev, addr)) {
while (--i) {
- nv_device_unmap_page(device,
- ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
+
+ ttm_dma->dma_address[i] = addr;
}
return 0;
}
@@ -1392,6 +1421,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1399,8 +1429,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1418,7 +1449,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- nv_device_unmap_page(device, ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
}
@@ -1426,47 +1458,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
void
-nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
-{
- struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
- struct nouveau_fence *old_fence = NULL;
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = new_fence;
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- nouveau_fence_unref(&old_fence);
-}
-
-static void
-nouveau_bo_fence_unref(void **sync_obj)
-{
- nouveau_fence_unref((struct nouveau_fence **)sync_obj);
-}
-
-static void *
-nouveau_bo_fence_ref(void *sync_obj)
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
- return nouveau_fence_ref(sync_obj);
-}
-
-static bool
-nouveau_bo_fence_signalled(void *sync_obj)
-{
- return nouveau_fence_done(sync_obj);
-}
+ struct reservation_object *resv = nvbo->bo.resv;
-static int
-nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
-{
- return nouveau_fence_wait(sync_obj, lazy, intr);
-}
-
-static int
-nouveau_bo_fence_flush(void *sync_obj)
-{
- return 0;
+ if (exclusive)
+ reservation_object_add_excl_fence(resv, &fence->base);
+ else if (fence)
+ reservation_object_add_shared_fence(resv, &fence->base);
}
struct ttm_bo_driver nouveau_bo_driver = {
@@ -1479,11 +1478,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = nouveau_bo_fence_signalled,
- .sync_obj_wait = nouveau_bo_fence_wait,
- .sync_obj_flush = nouveau_bo_fence_flush,
- .sync_obj_unref = nouveau_bo_fence_unref,
- .sync_obj_ref = nouveau_bo_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ff17c1f432fc..ae95b2d43b36 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,8 +9,8 @@ struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
u32 valid_domains;
- u32 placements[3];
- u32 busy_placements[3];
+ struct ttm_place placements[3];
+ struct ttm_place busy_placements[3];
struct ttm_bo_kmap_obj kmap;
struct list_head head;
@@ -78,7 +78,7 @@ u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
-void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index ccb6b452d6d0..99cd9e4a2aa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -22,16 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/device.h>
-#include <core/class.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/instmem.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
-#include <engine/software.h>
+/*XXX*/
+#include <core/client.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -47,7 +42,7 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = chan->cli;
+ struct nouveau_cli *cli = (void *)nvif_client(chan->object);
struct nouveau_fence *fence = NULL;
int ret;
@@ -58,8 +53,8 @@ nouveau_channel_idle(struct nouveau_channel *chan)
}
if (ret)
- NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
- chan->handle, cli->base.name);
+ NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
+ chan->object->handle, nvkm_client(&cli->base)->name);
return ret;
}
@@ -68,36 +63,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- struct nouveau_object *client = nv_object(chan->cli);
if (chan->fence) {
nouveau_channel_idle(chan);
nouveau_fence(chan->drm)->context_del(chan);
}
- nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
- nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
+ nvif_object_fini(&chan->nvsw);
+ nvif_object_fini(&chan->gart);
+ nvif_object_fini(&chan->vram);
+ nvif_object_ref(NULL, &chan->object);
+ nvif_object_fini(&chan->push.ctxdma);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
+ nvif_device_ref(NULL, &chan->device);
kfree(chan);
}
*pchan = NULL;
}
static int
-nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 size,
- struct nouveau_channel **pchan)
+nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_instmem *imem = nouveau_instmem(device);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_client *client = &cli->base;
- struct nv_dma_class args = {};
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
+ struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
- struct nouveau_object *push;
u32 target;
int ret;
@@ -105,9 +98,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
if (!chan)
return -ENOMEM;
- chan->cli = cli;
+ nvif_device_ref(device, &chan->device);
chan->drm = drm;
- chan->handle = handle;
/* allocate memory for dma push buffer */
target = TTM_PL_FLAG_TT;
@@ -132,51 +124,54 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
* we be able to call out to other (indirect) push buffers
*/
chan->push.vma.offset = chan->push.buffer->bo.offset;
- chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
- if (device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
&chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
- u64 limit = pfb->ram->size - imem->reserved - 1;
- if (device->card_type == NV_04) {
+ if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
- args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
- args.start = nv_device_resource_start(device, 1);
- args.limit = args.start + limit;
+ args.target = NV_DMA_V0_TARGET_PCI;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
+ args.start = nv_device_resource_start(nvkm_device(device), 1);
+ args.limit = args.start + device->info.ram_user - 1;
} else {
- args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VRAM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = limit;
+ args.limit = device->info.ram_user - 1;
}
} else {
if (chan->drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_AGP;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
}
- ret = nouveau_object_new(nv_object(chan->cli), parent,
- chan->push.handle, 0x0002,
- &args, sizeof(args), &push);
+ ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+ (handle & 0xffff), NV_DMA_FROM_MEMORY,
+ &args, sizeof(args), &chan->push.ctxdma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -186,38 +181,56 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
}
static int
-nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 engine,
- struct nouveau_channel **pchan)
+nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
- NVC0_CHANNEL_IND_CLASS,
- NV84_CHANNEL_IND_CLASS,
- NV50_CHANNEL_IND_CLASS,
+ static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
+ FERMI_CHANNEL_GPFIFO,
+ G82_CHANNEL_GPFIFO,
+ NV50_CHANNEL_GPFIFO,
0 };
const u16 *oclass = oclasses;
- struct nve0_channel_ind_class args;
+ union {
+ struct nv50_channel_gpfifo_v0 nv50;
+ struct kepler_channel_gpfifo_a_v0 kepler;
+ } args, *retn;
struct nouveau_channel *chan;
+ u32 size;
int ret;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
+ ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
- args.pushbuf = chan->push.handle;
- args.ioffset = 0x10000 + chan->push.vma.offset;
- args.ilength = 0x02000;
- args.engine = engine;
-
do {
- ret = nouveau_object_new(nv_object(cli), parent, handle,
- *oclass++, &args, sizeof(args),
- &chan->object);
- if (ret == 0)
+ if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
+ args.kepler.version = 0;
+ args.kepler.engine = engine;
+ args.kepler.pushbuf = chan->push.ctxdma.handle;
+ args.kepler.ilength = 0x02000;
+ args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+ size = sizeof(args.kepler);
+ } else {
+ args.nv50.version = 0;
+ args.nv50.pushbuf = chan->push.ctxdma.handle;
+ args.nv50.ilength = 0x02000;
+ args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+ size = sizeof(args.nv50);
+ }
+
+ ret = nvif_object_new(nvif_object(device), handle, *oclass++,
+ &args, size, &chan->object);
+ if (ret == 0) {
+ retn = chan->object->data;
+ if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
+ chan->chid = retn->kepler.chid;
+ else
+ chan->chid = retn->nv50.chid;
return ret;
+ }
} while (*oclass);
nouveau_channel_del(pchan);
@@ -225,35 +238,38 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
}
static int
-nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, struct nouveau_channel **pchan)
+nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
- NV17_CHANNEL_DMA_CLASS,
- NV10_CHANNEL_DMA_CLASS,
- NV03_CHANNEL_DMA_CLASS,
+ static const u16 oclasses[] = { NV40_CHANNEL_DMA,
+ NV17_CHANNEL_DMA,
+ NV10_CHANNEL_DMA,
+ NV03_CHANNEL_DMA,
0 };
const u16 *oclass = oclasses;
- struct nv03_channel_dma_class args;
+ struct nv03_channel_dma_v0 args, *retn;
struct nouveau_channel *chan;
int ret;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
+ ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
- args.pushbuf = chan->push.handle;
+ args.version = 0;
+ args.pushbuf = chan->push.ctxdma.handle;
args.offset = chan->push.vma.offset;
do {
- ret = nouveau_object_new(nv_object(cli), parent, handle,
- *oclass++, &args, sizeof(args),
- &chan->object);
- if (ret == 0)
+ ret = nvif_object_new(nvif_object(device), handle, *oclass++,
+ &args, sizeof(args), &chan->object);
+ if (ret == 0) {
+ retn = chan->object->data;
+ chan->chid = retn->chid;
return ret;
+ }
} while (ret && *oclass);
nouveau_channel_del(pchan);
@@ -263,60 +279,63 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
- struct nouveau_client *client = nv_client(chan->cli);
- struct nouveau_device *device = nv_device(chan->drm->device);
- struct nouveau_instmem *imem = nouveau_instmem(device);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nvif_device *device = chan->device;
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
struct nouveau_software_chan *swch;
- struct nouveau_object *object;
- struct nv_dma_class args = {};
+ struct nv_dma_v0 args = {};
int ret, i;
+ nvif_object_map(chan->object);
+
/* allocate dma objects to cover all allowed vram, and gart */
- if (device->card_type < NV_C0) {
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else {
- args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VRAM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = pfb->ram->size - imem->reserved - 1;
+ args.limit = device->info.ram_user - 1;
}
- ret = nouveau_object_new(nv_object(client), chan->handle, vram,
- 0x003d, &args, sizeof(args), &object);
+ ret = nvif_object_init(chan->object, NULL, vram,
+ NV_DMA_IN_MEMORY, &args,
+ sizeof(args), &chan->vram);
if (ret)
return ret;
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else
if (chan->drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_AGP;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
- ret = nouveau_object_new(nv_object(client), chan->handle, gart,
- 0x003d, &args, sizeof(args), &object);
+ ret = nvif_object_init(chan->object, NULL, gart,
+ NV_DMA_IN_MEMORY, &args,
+ sizeof(args), &chan->gart);
if (ret)
return ret;
-
- chan->vram = vram;
- chan->gart = gart;
}
/* initialise dma tracking parameters */
- switch (nv_hclass(chan->object) & 0x00ff) {
+ switch (chan->object->oclass & 0x00ff) {
case 0x006b:
case 0x006e:
chan->user_put = 0x40;
@@ -347,13 +366,13 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
OUT_RING(chan, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
- if (device->card_type < NV_10) {
- ret = nouveau_object_new(nv_object(client), chan->handle,
- NvSw, 0x006e, NULL, 0, &object);
+ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
+ ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+ NULL, 0, &chan->nvsw);
if (ret)
return ret;
- swch = (void *)object->parent;
+ swch = (void *)nvkm_object(&chan->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
@@ -362,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
- OUT_RING (chan, NvSw);
+ OUT_RING (chan, chan->nvsw.handle);
FIRE_RING (chan);
}
@@ -371,25 +390,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
int
-nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 arg0, u32 arg1,
+nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **pchan)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
int ret;
- ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
+ ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
if (ret) {
- NV_DEBUG(cli, "ib channel create, %d\n", ret);
- ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
+ NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+ ret = nouveau_channel_dma(drm, device, handle, pchan);
if (ret) {
- NV_DEBUG(cli, "dma channel create, %d\n", ret);
+ NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
return ret;
}
}
ret = nouveau_channel_init(*pchan, arg0, arg1);
if (ret) {
- NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
+ NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 40f97e2c47b6..20163709d608 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,20 +1,23 @@
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
-struct nouveau_cli;
+#include <nvif/object.h>
+struct nvif_device;
struct nouveau_channel {
- struct nouveau_cli *cli;
+ struct nvif_device *device;
struct nouveau_drm *drm;
- u32 handle;
- u32 vram;
- u32 gart;
+ int chid;
+
+ struct nvif_object vram;
+ struct nvif_object gart;
+ struct nvif_object nvsw;
struct {
struct nouveau_bo *buffer;
struct nouveau_vma vma;
- u32 handle;
+ struct nvif_object ctxdma;
} push;
/* TODO: this will be reworked in the near future */
@@ -34,12 +37,12 @@ struct nouveau_channel {
u32 user_get;
u32 user_put;
- struct nouveau_object *object;
+ struct nvif_object *object;
};
-int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
- u32 parent, u32 handle, u32 arg0, u32 arg1,
+int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
+ u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index dbdc9ad59546..1ec44c83e919 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -42,9 +42,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include <subdev/i2c.h>
-#include <subdev/gpio.h>
-#include <engine/disp.h>
+#include <nvif/event.h>
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
static int nouveau_tv_disable = 0;
@@ -102,7 +100,7 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- nouveau_event_ref(NULL, &nv_connector->hpd);
+ nvif_notify_fini(&nv_connector->hpd);
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -117,7 +115,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -206,7 +204,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
@@ -216,9 +214,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
- if (nv_device(drm->device)->card_type == NV_20 ||
- ((nv_device(drm->device)->card_type == NV_10 ||
- nv_device(drm->device)->card_type == NV_11) &&
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
+ (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
(dev->pdev->device & 0x0ff0) != 0x0100 &&
(dev->pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
@@ -802,11 +799,11 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
if (dcb->location != DCB_LOC_ON_CHIP ||
- nv_device(drm->device)->chipset >= 0x46)
+ drm->device.info.chipset >= 0x46)
return 165000;
- else if (nv_device(drm->device)->chipset >= 0x40)
+ else if (drm->device.info.chipset >= 0x40)
return 155000;
- else if (nv_device(drm->device)->chipset >= 0x18)
+ else if (drm->device.info.chipset >= 0x18)
return 135000;
else
return 112000;
@@ -939,18 +936,19 @@ nouveau_connector_funcs_dp = {
.force = nouveau_connector_force
};
-static void
-nouveau_connector_hotplug_work(struct work_struct *work)
+static int
+nouveau_connector_hotplug(struct nvif_notify *notify)
{
struct nouveau_connector *nv_connector =
- container_of(work, typeof(*nv_connector), work);
+ container_of(notify, typeof(*nv_connector), hpd);
struct drm_connector *connector = &nv_connector->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
- if (nv_connector->status & NVKM_HPD_IRQ) {
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
} else {
- bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG);
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
@@ -961,16 +959,7 @@ nouveau_connector_hotplug_work(struct work_struct *work)
drm_helper_hpd_irq_event(connector->dev);
}
- nouveau_event_get(nv_connector->hpd);
-}
-
-static int
-nouveau_connector_hotplug(void *data, u32 type, int index)
-{
- struct nouveau_connector *nv_connector = data;
- nv_connector->status = type;
- schedule_work(&nv_connector->work);
- return NVKM_EVENT_DROP;
+ return NVIF_NOTIFY_KEEP;
}
static ssize_t
@@ -1040,7 +1029,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct drm_connector *connector;
int type, ret = 0;
bool dummy;
@@ -1194,7 +1182,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
@@ -1226,16 +1214,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index,
- nouveau_connector_hotplug,
- nv_connector, &nv_connector->hpd);
+ ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
+ true, NV04_DISP_NTFY_CONN,
+ &(struct nvif_notify_conn_req_v0) {
+ .mask = NVIF_NOTIFY_CONN_V0_ANY,
+ .conn = index,
+ },
+ sizeof(struct nvif_notify_conn_req_v0),
+ sizeof(struct nvif_notify_conn_rep_v0),
+ &nv_connector->hpd);
if (ret)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
else
connector->polled = DRM_CONNECTOR_POLL_HPD;
- INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
-
drm_connector_register(connector);
return connector;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 8861b6c579ad..68029d041dd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -27,14 +27,12 @@
#ifndef __NOUVEAU_CONNECTOR_H__
#define __NOUVEAU_CONNECTOR_H__
+#include <nvif/notify.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
-#include <core/event.h>
-
-#include <subdev/bios.h>
-
struct nouveau_i2c_port;
enum nouveau_underscan_type {
@@ -67,9 +65,7 @@ struct nouveau_connector {
u8 index;
u8 *dcb;
- struct nouveau_eventh *hpd;
- u32 status;
- struct work_struct work;
+ struct nvif_notify hpd;
struct drm_dp_aux aux;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index a0534489d23f..f19cb1c5fc5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -27,10 +27,13 @@
#ifndef __NOUVEAU_CRTC_H__
#define __NOUVEAU_CRTC_H__
+#include <nvif/notify.h>
+
struct nouveau_crtc {
struct drm_crtc base;
int index;
+ struct nvif_notify vblank;
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
@@ -46,7 +49,7 @@ struct nouveau_crtc {
int cpp;
bool blanked;
uint32_t offset;
- uint32_t tile_flags;
+ uint32_t handle;
} fb;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 37a6ab8a97f8..6d0a3cdc752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -27,6 +27,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <nvif/class.h>
+
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
@@ -37,35 +39,42 @@
#include "nouveau_fence.h"
-#include <engine/disp.h>
-
-#include <core/class.h>
+#include <nvif/event.h>
static int
-nouveau_display_vblank_handler(void *data, u32 type, int head)
+nouveau_display_vblank_handler(struct nvif_notify *notify)
{
- struct nouveau_drm *drm = data;
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
+ struct nouveau_crtc *nv_crtc =
+ container_of(notify, typeof(*nv_crtc), vblank);
+ drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ return NVIF_NOTIFY_KEEP;
}
int
nouveau_display_vblank_enable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp) {
- nouveau_event_get(disp->vblank[head]);
- return 0;
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_get(&nv_crtc->vblank);
+ return 0;
+ }
}
- return -EIO;
+ return -EINVAL;
}
void
nouveau_display_vblank_disable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp)
- nouveau_event_put(disp->vblank[head]);
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_put(&nv_crtc->vblank);
+ return;
+ }
+ }
}
static inline int
@@ -86,17 +95,22 @@ int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
- const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct {
+ struct nv04_disp_mthd_v0 base;
+ struct nv04_disp_scanoutpos_v0 scan;
+ } args = {
+ .base.method = NV04_DISP_SCANOUTPOS,
+ .base.head = nouveau_crtc(crtc)->index,
+ };
struct nouveau_display *disp = nouveau_display(crtc->dev);
- struct nv04_display_scanoutpos args;
int ret, retry = 1;
do {
- ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
if (ret != 0)
return 0;
- if (args.vline) {
+ if (args.scan.vline) {
ret |= DRM_SCANOUTPOS_ACCURATE;
ret |= DRM_SCANOUTPOS_VALID;
break;
@@ -105,10 +119,11 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
if (retry) ndelay(crtc->linedur_ns);
} while (retry--);
- *hpos = args.hline;
- *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
- if (stime) *stime = ns_to_ktime(args.time[0]);
- if (etime) *etime = ns_to_ktime(args.time[1]);
+ *hpos = args.scan.hline;
+ *vpos = calc(args.scan.vblanks, args.scan.vblanke,
+ args.scan.vtotal, args.scan.vline);
+ if (stime) *stime = ns_to_ktime(args.scan.time[0]);
+ if (etime) *etime = ns_to_ktime(args.scan.time[1]);
if (*vpos < 0)
ret |= DRM_SCANOUTPOS_IN_VBLANK;
@@ -151,16 +166,13 @@ nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
static void
nouveau_display_vblank_fini(struct drm_device *dev)
{
- struct nouveau_display *disp = nouveau_display(dev);
- int i;
+ struct drm_crtc *crtc;
drm_vblank_cleanup(dev);
- if (disp->vblank) {
- for (i = 0; i < dev->mode_config.num_crtc; i++)
- nouveau_event_ref(NULL, &disp->vblank[i]);
- kfree(disp->vblank);
- disp->vblank = NULL;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvif_notify_fini(&nv_crtc->vblank);
}
}
@@ -168,19 +180,20 @@ static int
nouveau_display_vblank_init(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- int ret, i;
-
- disp->vblank = kzalloc(dev->mode_config.num_crtc *
- sizeof(*disp->vblank), GFP_KERNEL);
- if (!disp->vblank)
- return -ENOMEM;
+ struct drm_crtc *crtc;
+ int ret;
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- ret = nouveau_event_new(pdisp->vblank, 1, i,
- nouveau_display_vblank_handler,
- drm, &disp->vblank[i]);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ ret = nvif_notify_init(&disp->disp, NULL,
+ nouveau_display_vblank_handler, false,
+ NV04_DISP_NTFY_VBLANK,
+ &(struct nvif_notify_head_req_v0) {
+ .head = nv_crtc->index,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &nv_crtc->vblank);
if (ret) {
nouveau_display_vblank_fini(dev);
return ret;
@@ -200,6 +213,10 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm_fb->dev);
+
+ if (disp->fb_dtor)
+ disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -229,63 +246,24 @@ nouveau_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
- if (nv_device(drm->device)->card_type >= NV_50) {
- u32 tile_flags = nouveau_bo_tile_layout(nvbo);
- if (tile_flags == 0x7a00 ||
- tile_flags == 0xfe00)
- nv_fb->r_dma = NvEvoFB32;
- else
- if (tile_flags == 0x7000)
- nv_fb->r_dma = NvEvoFB16;
- else
- nv_fb->r_dma = NvEvoVRAM_LP;
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
- }
-
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- NV_ERROR(drm, "framebuffer requires contiguous bo\n");
- return -EINVAL;
- }
-
- if (nv_device(drm->device)->chipset == 0x50)
- nv_fb->r_format |= (tile_flags << 8);
-
- if (!tile_flags) {
- if (nv_device(drm->device)->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
- else
- nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
- } else {
- u32 mode = nvbo->tile_mode;
- if (nv_device(drm->device)->card_type >= NV_C0)
- mode >>= 4;
- nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
- }
- }
-
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
+ if (ret)
return ret;
+
+ if (disp->fb_ctor) {
+ ret = disp->fb_ctor(fb);
+ if (ret)
+ disp->fb_dtor(fb);
}
- return 0;
+ return ret;
}
static struct drm_framebuffer *
@@ -393,7 +371,7 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_get(conn->hpd);
+ nvif_notify_get(&conn->hpd);
}
return ret;
@@ -404,37 +382,32 @@ nouveau_display_fini(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
struct drm_connector *connector;
+ int head;
+
+ /* Make sure that drm and hw vblank irqs get properly disabled. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_off(dev, head);
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_put(conn->hpd);
+ nvif_notify_put(&conn->hpd);
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
-int
-nouveau_display_create(struct drm_device *dev)
+static void
+nouveau_display_create_properties(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_display *disp;
- int ret, gen;
-
- disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dvi_i_properties(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
+ int gen;
- if (nv_device(drm->device)->card_type < NV_50)
+ if (disp->disp.oclass < NV50_DISP)
gen = 0;
else
- if (nv_device(drm->device)->card_type < NV_D0)
+ if (disp->disp.oclass < GF110_DISP)
gen = 1;
else
gen = 2;
@@ -449,26 +422,43 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen >= 1) {
- /* -90..+90 */
- disp->vibrant_hue_property =
- drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ if (gen < 1)
+ return;
- /* -100..+100 */
- disp->color_vibrance_property =
- drm_property_create_range(dev, 0, "color vibrance", 0, 200);
- }
+ /* -90..+90 */
+ disp->vibrant_hue_property =
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+
+ /* -100..+100 */
+ disp->color_vibrance_property =
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp;
+ int ret;
+
+ disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = nv_device_resource_start(device, 1);
+ dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- if (nv_device(drm->device)->card_type < NV_10) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -479,7 +469,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- if (nv_device(drm->device)->chipset < 0x11)
+ if (drm->device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
@@ -487,29 +477,30 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (drm->vbios.dcb.entries) {
+ if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GM107_DISP_CLASS,
- NVF0_DISP_CLASS,
- NVE0_DISP_CLASS,
- NVD0_DISP_CLASS,
- NVA3_DISP_CLASS,
- NV94_DISP_CLASS,
- NVA0_DISP_CLASS,
- NV84_DISP_CLASS,
- NV50_DISP_CLASS,
- NV04_DISP_CLASS,
+ GM107_DISP,
+ GK110_DISP,
+ GK104_DISP,
+ GF110_DISP,
+ GT214_DISP,
+ GT206_DISP,
+ GT200_DISP,
+ G82_DISP,
+ NV50_DISP,
+ NV04_DISP,
};
int i;
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
- NVDRM_DISPLAY, oclass[i],
- NULL, 0, &disp->core);
+ ret = nvif_object_init(nvif_object(&drm->device), NULL,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->disp);
}
if (ret == 0) {
- if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ nouveau_display_create_properties(dev);
+ if (disp->disp.oclass < NV50_DISP)
ret = nv04_display_create(dev);
else
ret = nv50_display_create(dev);
@@ -542,7 +533,6 @@ void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
@@ -553,7 +543,7 @@ nouveau_display_destroy(struct drm_device *dev)
if (disp->dtor)
disp->dtor(dev);
- nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+ nvif_object_fini(&disp->disp);
nouveau_drm(dev)->display = NULL;
kfree(disp);
@@ -602,7 +592,9 @@ nouveau_display_repin(struct drm_device *dev)
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
- nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -620,6 +612,8 @@ void
nouveau_display_resume(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ int head;
+
nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
@@ -629,6 +623,10 @@ nouveau_display_resume(struct drm_device *dev)
nv_crtc->lut.depth = 0;
}
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -660,7 +658,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
- ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+ ret = nouveau_fence_sync(old_bo, chan, false);
if (ret)
goto fail;
@@ -669,7 +667,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (nv_device(drm->device)->card_type < NV_C0)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
else
BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
@@ -698,12 +696,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan = drm->channel;
+ struct nouveau_channel *chan;
+ struct nouveau_cli *cli;
struct nouveau_fence *fence;
int ret;
- if (!drm->channel)
+ chan = drm->channel;
+ if (!chan)
return -ENODEV;
+ cli = (void *)nvif_client(&chan->device->base);
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -715,20 +716,25 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
goto fail_free;
}
- mutex_lock(&chan->cli->mutex);
-
- /* synchronise rendering channel with the kernel's channel */
- spin_lock(&new_bo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(new_bo->bo.sync_obj);
- spin_unlock(&new_bo->bo.bdev->fence_lock);
- ret = nouveau_fence_sync(fence, chan);
- nouveau_fence_unref(&fence);
+ mutex_lock(&cli->mutex);
+ ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
- ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
- if (ret)
+ /* synchronise rendering channel with the kernel's channel */
+ ret = nouveau_fence_sync(new_bo, chan, false);
+ if (ret) {
+ ttm_bo_unreserve(&new_bo->bo);
goto fail_unpin;
+ }
+
+ if (new_bo != old_bo) {
+ ttm_bo_unreserve(&new_bo->bo);
+
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
+ if (ret)
+ goto fail_unpin;
+ }
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
@@ -740,7 +746,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_vblank_get(dev, nouveau_crtc(crtc)->index);
/* Emit a page flip */
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
@@ -769,12 +775,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
/* Update the crtc struct and cleanup */
crtc->primary->fb = fb;
- nouveau_bo_fence(old_bo, fence);
+ nouveau_bo_fence(old_bo, fence, false);
ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
@@ -785,7 +791,7 @@ fail_unreserve:
drm_vblank_put(dev, nouveau_crtc(crtc)->index);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
fail_free:
@@ -815,7 +821,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
/* Vblank timestamps/counts are only correct on >= NV-50 */
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
crtcid = s->crtc;
drm_send_vblank_event(dev, crtcid, s->event);
@@ -841,7 +847,7 @@ nouveau_flip_complete(void *data)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
nv_set_crtc_base(drm->dev, state.crtc, state.offset +
state.y * state.pitch +
state.x * state.bpp / 8);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index a71cf77e55b2..88ca177cb1c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -9,9 +9,11 @@ struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
struct nouveau_vma vma;
- u32 r_dma;
+ u32 r_handle;
u32 r_format;
u32 r_pitch;
+ struct nvif_object h_base[4];
+ struct nvif_object h_core;
};
static inline struct nouveau_framebuffer *
@@ -36,8 +38,10 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- struct nouveau_object *core;
- struct nouveau_eventh **vblank;
+ int (*fb_ctor)(struct drm_framebuffer *);
+ void (*fb_dtor)(struct drm_framebuffer *);
+
+ struct nvif_object disp;
struct drm_property *dithering_mode;
struct drm_property *dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c177272152e2..8508603cc8c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,8 +24,6 @@
*
*/
-#include <core/client.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -54,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
uint64_t val;
- val = nv_ro32(chan->object, chan->user_get);
+ val = nvif_rd32(chan, chan->user_get);
if (chan->user_get_hi)
- val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
+ val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -84,12 +82,13 @@ void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_bo *pb = chan->push.buffer;
struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
- vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
+ vma = nouveau_bo_vma_find(bo, cli->vm);
BUG_ON(!vma);
offset = vma->offset + delta;
@@ -104,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
- nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
+ nvif_wr32(chan, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}
@@ -114,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
uint32_t cnt = 0, prev_get = 0;
while (chan->dma.ib_free < count) {
- uint32_t get = nv_ro32(chan->object, 0x88);
+ uint32_t get = nvif_rd32(chan, 0x88);
if (get != prev_get) {
prev_get = get;
cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dc0e0c5cadb4..8da0a272c45a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -58,31 +58,14 @@ enum {
FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
};
-/* Object handles. */
+/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
- NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
NvNotify0 = 0x80000006,
- Nv2D = 0x80000007,
- NvCtxSurf2D = 0x80000008,
- NvRop = 0x80000009,
- NvImagePatt = 0x8000000a,
- NvClipRect = 0x8000000b,
- NvGdiRect = 0x8000000c,
- NvImageBlit = 0x8000000d,
- NvSw = 0x8000000e,
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
- NvNotify1 = 0x80000012,
-
- /* G80+ display objects */
- NvEvoVRAM = 0x01000000,
- NvEvoFB16 = 0x01000001,
- NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003,
- NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -157,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
- nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+ nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 5675ffc175ae..c5137cccce7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,11 +30,6 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include <core/class.h>
-
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
u8 *dpcd)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5425ffe3931d..cee1eaf64117 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -27,21 +27,14 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+
#include "drmP.h"
#include "drm_crtc_helper.h"
+
#include <core/device.h>
-#include <core/client.h>
#include <core/gpuobj.h>
-#include <core/class.h>
#include <core/option.h>
-#include <engine/device.h>
-#include <engine/disp.h>
-#include <engine/fifo.h>
-#include <engine/software.h>
-
-#include <subdev/vm.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
@@ -57,6 +50,7 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
+#include "nouveau_usif.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -79,7 +73,9 @@ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1
int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
-static struct drm_driver driver;
+static struct drm_driver driver_stub;
+static struct drm_driver driver_pci;
+static struct drm_driver driver_platform;
static u64
nouveau_pci_name(struct pci_dev *pdev)
@@ -109,40 +105,37 @@ static int
nouveau_cli_create(u64 name, const char *sname,
int size, void **pcli)
{
- struct nouveau_cli *cli;
- int ret;
-
- *pcli = NULL;
- ret = nouveau_client_create_(sname, name, nouveau_config,
- nouveau_debug, size, pcli);
- cli = *pcli;
- if (ret) {
- if (cli)
- nouveau_client_destroy(&cli->base);
- *pcli = NULL;
+ struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+ if (cli) {
+ int ret = nvif_client_init(NULL, NULL, sname, name,
+ nouveau_config, nouveau_debug,
+ &cli->base);
+ if (ret == 0) {
+ mutex_init(&cli->mutex);
+ usif_client_init(cli);
+ }
return ret;
}
-
- mutex_init(&cli->mutex);
- return 0;
+ return -ENOMEM;
}
static void
nouveau_cli_destroy(struct nouveau_cli *cli)
{
- struct nouveau_object *client = nv_object(cli);
- nouveau_vm_ref(NULL, &cli->base.vm, NULL);
- nouveau_client_fini(&cli->base, false);
- atomic_set(&client->refcount, 1);
- nouveau_object_ref(NULL, &client);
+ nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL);
+ nvif_client_fini(&cli->base);
+ usif_client_fini(cli);
}
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
- nouveau_gpuobj_ref(NULL, &drm->notify);
nouveau_channel_del(&drm->channel);
+ nvif_object_fini(&drm->ntfy);
+ nouveau_gpuobj_ref(NULL, &drm->notify);
+ nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->cechan);
+ nvif_object_fini(&drm->ttm.copy);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
}
@@ -150,46 +143,71 @@ nouveau_accel_fini(struct nouveau_drm *drm)
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_object *object;
+ struct nvif_device *device = &drm->device;
u32 arg0, arg1;
- int ret;
+ u32 sclass[16];
+ int ret, i;
- if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
+ if (nouveau_noaccel)
return;
/* initialise synchronisation routines */
- if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->card_type < NV_11 ||
- device->chipset < 0x17) ret = nv10_fence_create(drm);
- else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
- else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
- else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
- else ret = nvc0_fence_create(drm);
+ /*XXX: this is crap, but the fence/channel stuff is a little
+ * backwards in some places. this will be fixed.
+ */
+ ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+ if (ret < 0)
+ return;
+
+ for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
+ switch (sclass[i]) {
+ case NV03_CHANNEL_DMA:
+ ret = nv04_fence_create(drm);
+ break;
+ case NV10_CHANNEL_DMA:
+ ret = nv10_fence_create(drm);
+ break;
+ case NV17_CHANNEL_DMA:
+ case NV40_CHANNEL_DMA:
+ ret = nv17_fence_create(drm);
+ break;
+ case NV50_CHANNEL_GPFIFO:
+ ret = nv50_fence_create(drm);
+ break;
+ case G82_CHANNEL_GPFIFO:
+ ret = nv84_fence_create(drm);
+ break;
+ case FERMI_CHANNEL_GPFIFO:
+ case KEPLER_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
+ default:
+ break;
+ }
+ }
+
if (ret) {
NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- if (device->card_type >= NV_E0) {
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
- NVDRM_CHAN + 1,
- NVE0_CHANNEL_IND_ENGINE_CE0 |
- NVE0_CHANNEL_IND_ENGINE_CE1, 0,
- &drm->cechan);
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
+ KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
+ KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
+ 0, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
- arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
+ arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
arg1 = 1;
} else
- if (device->chipset >= 0xa3 &&
- device->chipset != 0xaa &&
- device->chipset != 0xac) {
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
- NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
- &drm->cechan);
+ if (device->info.chipset >= 0xa3 &&
+ device->info.chipset != 0xaa &&
+ device->info.chipset != 0xac) {
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
+ NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -200,30 +218,30 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg1 = NvDmaTT;
}
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
- arg0, arg1, &drm->channel);
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1,
+ &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
- nouveau_abi16_swclass(drm), NULL, 0, &object);
+ ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+ nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
if (ret == 0) {
- struct nouveau_software_chan *swch = (void *)object->parent;
+ struct nouveau_software_chan *swch;
ret = RING_SPACE(drm->channel, 2);
if (ret == 0) {
- if (device->card_type < NV_C0) {
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
OUT_RING (drm->channel, NVDRM_NVSW);
} else
- if (device->card_type < NV_E0) {
+ if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
OUT_RING (drm->channel, 0x001f0000);
}
}
- swch = (void *)object->parent;
+ swch = (void *)nvkm_object(&drm->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = drm->channel;
}
@@ -234,24 +252,24 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
- if (device->card_type < NV_C0) {
- ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
- &drm->notify);
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+ ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32,
+ 0, 0, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- ret = nouveau_object_new(nv_object(drm),
- drm->channel->handle, NvNotify0,
- 0x003d, &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = drm->notify->addr,
.limit = drm->notify->addr + 31
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &drm->ntfy);
if (ret) {
nouveau_accel_fini(drm);
return;
@@ -294,7 +312,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
#ifdef CONFIG_X86
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ if (nouveau_modeset != 2)
+ remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
@@ -305,7 +324,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = drm_get_pci_dev(pdev, pent, &driver);
+ ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
nouveau_object_ref(NULL, (struct nouveau_object **)&device);
return ret;
@@ -348,7 +367,6 @@ static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = dev->pdev;
- struct nouveau_device *device;
struct nouveau_drm *drm;
int ret;
@@ -359,7 +377,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
- nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM");
+ nvkm_client(&drm->client.base)->debug =
+ nouveau_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -370,33 +389,34 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
* (possibly) execute vbios init tables (see nouveau_agp.h)
*/
if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
+ const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
+ NV_DEVICE_V0_DISABLE_MMIO;
/* dummy device object, doesn't init anything, but allows
* agp code access to registers
*/
- ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
- NVDRM_DEVICE, 0x0080,
- &(struct nv_device_class) {
+ ret = nvif_device_init(&drm->client.base.base, NULL,
+ NVDRM_DEVICE, NV_DEVICE,
+ &(struct nv_device_v0) {
.device = ~0,
- .disable =
- ~(NV_DEVICE_DISABLE_MMIO |
- NV_DEVICE_DISABLE_IDENTIFY),
+ .disable = ~enables,
.debug0 = ~0,
- }, sizeof(struct nv_device_class),
- &drm->device);
+ }, sizeof(struct nv_device_v0),
+ &drm->device);
if (ret)
goto fail_device;
nouveau_agp_reset(drm);
- nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
+ nvif_device_fini(&drm->device);
}
- ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
- 0x0080, &(struct nv_device_class) {
+ ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
+ NV_DEVICE,
+ &(struct nv_device_v0) {
.device = ~0,
.disable = 0,
.debug0 = 0,
- }, sizeof(struct nv_device_class),
- &drm->device);
+ }, sizeof(struct nv_device_v0),
+ &drm->device);
if (ret)
goto fail_device;
@@ -406,18 +426,19 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
*/
- device = nv_device(drm->device);
- if (nv_device(drm->device)->chipset == 0xc1)
- nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
+ if (drm->device.info.chipset == 0xc1)
+ nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
nouveau_vga_init(drm);
nouveau_agp_init(drm);
- if (device->card_type >= NV_50) {
- ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
- 0x1000, &drm->client.base.vm);
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &drm->client.vm);
if (ret)
goto fail_device;
+
+ nvkm_client(&drm->client.base)->vm = drm->client.vm;
}
ret = nouveau_ttm_init(drm);
@@ -463,6 +484,7 @@ fail_ttm:
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
fail_device:
+ nvif_device_fini(&drm->device);
nouveau_cli_destroy(&drm->client);
return ret;
}
@@ -488,26 +510,37 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
+ nvif_device_fini(&drm->device);
if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device);
nouveau_cli_destroy(&drm->client);
return 0;
}
-static void
-nouveau_drm_remove(struct pci_dev *pdev)
+void
+nouveau_drm_device_remove(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_client *client;
struct nouveau_object *device;
dev->irq_enabled = false;
- device = drm->client.base.device;
+ client = nvkm_client(&drm->client.base);
+ device = client->device;
drm_put_dev(dev);
nouveau_object_ref(NULL, &device);
nouveau_object_debug();
}
+EXPORT_SYMBOL(nouveau_drm_device_remove);
+
+static void
+nouveau_drm_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ nouveau_drm_device_remove(dev);
+}
static int
nouveau_do_suspend(struct drm_device *dev, bool runtime)
@@ -548,13 +581,13 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
list_for_each_entry(cli, &drm->clients, head) {
- ret = nouveau_client_fini(&cli->base, true);
+ ret = nvif_client_suspend(&cli->base);
if (ret)
goto fail_client;
}
NV_INFO(drm, "suspending kernel object tree...\n");
- ret = nouveau_client_fini(&drm->client.base, true);
+ ret = nvif_client_suspend(&drm->client.base);
if (ret)
goto fail_client;
@@ -563,7 +596,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
fail_client:
list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
- nouveau_client_init(&cli->base);
+ nvif_client_resume(&cli->base);
}
if (drm->fence && nouveau_fence(drm)->resume)
@@ -611,7 +644,7 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_agp_reset(drm);
NV_INFO(drm, "resuming kernel object tree...\n");
- nouveau_client_init(&drm->client.base);
+ nvif_client_resume(&drm->client.base);
nouveau_agp_init(drm);
NV_INFO(drm, "resuming client object trees...\n");
@@ -619,7 +652,7 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_fence(drm)->resume(drm);
list_for_each_entry(cli, &drm->clients, head) {
- nouveau_client_init(&cli->base);
+ nvif_client_resume(&cli->base);
}
nouveau_run_vbios_init(dev);
@@ -715,13 +748,17 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (ret)
goto out_suspend;
- if (nv_device(drm->device)->card_type >= NV_50) {
- ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
- 0x1000, &cli->base.vm);
+ cli->base.super = false;
+
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &cli->vm);
if (ret) {
nouveau_cli_destroy(cli);
goto out_suspend;
}
+
+ nvkm_client(&cli->base)->vm = cli->vm;
}
fpriv->driver_priv = cli;
@@ -779,24 +816,31 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
-long nouveau_drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
+long
+nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev;
+ struct drm_file *filp = file->private_data;
+ struct drm_device *dev = filp->minor->dev;
long ret;
- dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
- ret = drm_ioctl(filp, cmd, arg);
+ switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
+ case DRM_NOUVEAU_NVIF:
+ ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ default:
+ ret = drm_ioctl(file, cmd, arg);
+ break;
+ }
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
+
static const struct file_operations
nouveau_driver_fops = {
.owner = THIS_MODULE,
@@ -813,7 +857,7 @@ nouveau_driver_fops = {
};
static struct drm_driver
-driver = {
+driver_stub = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
@@ -845,6 +889,7 @@ driver = {
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = nouveau_gem_prime_pin,
+ .gem_prime_res_obj = nouveau_gem_prime_res_obj,
.gem_prime_unpin = nouveau_gem_prime_unpin,
.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
@@ -920,7 +965,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_device *device = nouveau_dev(drm_dev);
+ struct nvif_device *device = &nouveau_drm(drm_dev)->device;
int ret;
if (nouveau_runtime_pm == 0)
@@ -936,7 +981,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
- nv_mask(device, 0x88488, (1 << 25), (1 << 25));
+ nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
nv_debug_level(NORMAL);
@@ -1004,28 +1049,50 @@ nouveau_drm_pci_driver = {
.driver.pm = &nouveau_pm_ops,
};
-int nouveau_drm_platform_probe(struct platform_device *pdev)
+struct drm_device *
+nouveau_platform_device_create_(struct platform_device *pdev, int size,
+ void **pobject)
{
- struct nouveau_device *device;
- int ret;
+ struct drm_device *drm;
+ int err;
- ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM,
+ err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM,
nouveau_platform_name(pdev),
dev_name(&pdev->dev), nouveau_config,
- nouveau_debug, &device);
-
- ret = drm_platform_init(&driver, pdev);
- if (ret) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&device);
- return ret;
+ nouveau_debug, size, pobject);
+ if (err)
+ return ERR_PTR(err);
+
+ drm = drm_dev_alloc(&driver_platform, &pdev->dev);
+ if (!drm) {
+ err = -ENOMEM;
+ goto err_free;
}
- return ret;
+ err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
+ if (err < 0)
+ goto err_free;
+
+ drm->platformdev = pdev;
+ platform_set_drvdata(pdev, drm);
+
+ return drm;
+
+err_free:
+ nouveau_object_ref(NULL, (struct nouveau_object **)pobject);
+
+ return ERR_PTR(err);
}
+EXPORT_SYMBOL(nouveau_platform_device_create_);
static int __init
nouveau_drm_init(void)
{
+ driver_pci = driver_stub;
+ driver_pci.set_busid = drm_pci_set_busid;
+ driver_platform = driver_stub;
+ driver_platform.set_busid = drm_platform_set_busid;
+
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
@@ -1037,7 +1104,7 @@ nouveau_drm_init(void)
return 0;
nouveau_register_dsm_handler();
- return drm_pci_init(&driver, &nouveau_drm_pci_driver);
+ return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
}
static void __exit
@@ -1046,7 +1113,7 @@ nouveau_drm_exit(void)
if (!nouveau_modeset)
return;
- drm_pci_exit(&driver, &nouveau_drm_pci_driver);
+ drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 7efbafaf7c1d..b02b02452c85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@@ -21,15 +21,17 @@
* to control registers on the MPs to enable performance counters,
* and to control the warp error enable mask (OpenGL requires out of
* bounds access to local memory to be silently ignored / return 0).
+ * 1.1.2:
+ * - fixes multiple bugs in flip completion events and timestamping
+ * 1.2.0:
+ * - object api exposed to userspace
+ * - fermi,kepler,maxwell zbc
*/
-#include <core/client.h>
-#include <core/event.h>
-
-#include <subdev/vm.h>
+#include <nvif/client.h>
+#include <nvif/device.h>
#include <drmP.h>
-#include <drm/nouveau_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -38,7 +40,10 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
+#include "uapi/drm/nouveau_drm.h"
+
struct nouveau_channel;
+struct platform_device;
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
@@ -50,6 +55,17 @@ struct nouveau_drm_tile {
bool used;
};
+enum nouveau_drm_object_route {
+ NVDRM_OBJECT_NVIF = 0,
+ NVDRM_OBJECT_USIF,
+ NVDRM_OBJECT_ABI16,
+};
+
+enum nouveau_drm_notify_route {
+ NVDRM_NOTIFY_NVIF = 0,
+ NVDRM_NOTIFY_USIF
+};
+
enum nouveau_drm_handle {
NVDRM_CLIENT = 0xffffffff,
NVDRM_DEVICE = 0xdddddddd,
@@ -61,10 +77,13 @@ enum nouveau_drm_handle {
};
struct nouveau_cli {
- struct nouveau_client base;
+ struct nvif_client base;
+ struct nouveau_vm *vm; /*XXX*/
struct list_head head;
struct mutex mutex;
void *abi16;
+ struct list_head objects;
+ struct list_head notifys;
};
static inline struct nouveau_cli *
@@ -73,13 +92,16 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+#include <nvif/object.h>
+#include <nvif/device.h>
+
extern int nouveau_runtime_pm;
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
- struct nouveau_object *device;
+ struct nvif_device device;
struct list_head clients;
struct {
@@ -102,6 +124,7 @@ struct nouveau_drm {
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
struct nouveau_channel *chan;
+ struct nvif_object copy;
int mtrr;
} ttm;
@@ -119,6 +142,8 @@ struct nouveau_drm {
struct nouveau_channel *channel;
struct nouveau_gpuobj *notify;
struct nouveau_fbdev *fbcon;
+ struct nvif_object nvsw;
+ struct nvif_object ntfy;
/* nv10-nv40 tiling regions */
struct {
@@ -148,20 +173,25 @@ nouveau_drm(struct drm_device *dev)
return dev->dev_private;
}
-static inline struct nouveau_device *
-nouveau_dev(struct drm_device *dev)
-{
- return nv_device(nouveau_drm(dev)->device);
-}
-
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
-#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
-#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
-#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
-#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
-#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args)
+#define nouveau_platform_device_create(p, u) \
+ nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
+struct drm_device *
+nouveau_platform_device_create_(struct platform_device *pdev,
+ int size, void **pobject);
+void nouveau_drm_device_remove(struct drm_device *dev);
+
+#define NV_PRINTK(l,c,f,a...) do { \
+ struct nouveau_cli *_cli = (c); \
+ nv_##l(_cli->base.base.priv, f, ##a); \
+} while(0)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
+#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 758c11cb9a9a..8bdd27091db8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -51,11 +51,6 @@
#include "nouveau_crtc.h"
-#include <core/client.h>
-#include <core/device.h>
-
-#include <subdev/fb.h>
-
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
static int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
@@ -65,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -74,10 +69,10 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_fillrect(info, rect);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_fillrect(info, rect);
else
ret = nvc0_fbcon_fillrect(info, rect);
@@ -97,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -106,10 +101,10 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_copyarea(info, image);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_copyarea(info, image);
else
ret = nvc0_fbcon_copyarea(info, image);
@@ -129,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -138,10 +133,10 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_imageblit(info, image);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_imageblit(info, image);
else
ret = nvc0_fbcon_imageblit(info, image);
@@ -212,6 +207,65 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
+void
+nouveau_fbcon_accel_save_disable(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+ drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+}
+
+void
+nouveau_fbcon_accel_restore(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+ }
+}
+
+static void
+nouveau_fbcon_accel_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ if (fbcon && drm->channel) {
+ console_lock();
+ fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ console_unlock();
+ nouveau_channel_idle(drm->channel);
+ nvif_object_fini(&fbcon->twod);
+ nvif_object_fini(&fbcon->blit);
+ nvif_object_fini(&fbcon->gdi);
+ nvif_object_fini(&fbcon->patt);
+ nvif_object_fini(&fbcon->rop);
+ nvif_object_fini(&fbcon->clip);
+ nvif_object_fini(&fbcon->surf2d);
+ }
+}
+
+static void
+nouveau_fbcon_accel_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ struct fb_info *info = fbcon->helper.fbdev;
+ int ret;
+
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
+}
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -257,7 +311,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
@@ -299,8 +353,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
chan = nouveau_nofbaccel ? NULL : drm->channel;
- if (chan && device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
+ if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
&fbcon->nouveau_fb.vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
@@ -357,20 +411,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mutex_unlock(&dev->struct_mutex);
- if (chan) {
- ret = -ENODEV;
- if (device->card_type < NV_50)
- ret = nv04_fbcon_accel_init(info);
- else
- if (device->card_type < NV_C0)
- ret = nv50_fbcon_accel_init(info);
- else
- ret = nvc0_fbcon_accel_init(info);
-
- if (ret == 0)
- info->fbops = &nouveau_fbcon_ops;
- }
-
+ if (chan)
+ nouveau_fbcon_accel_init(dev);
nouveau_fbcon_zfill(dev, fbcon);
/* To allow resizeing without swapping buffers */
@@ -449,7 +491,6 @@ int
nouveau_fbcon_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_fbdev *fbcon;
int preferred_bpp;
int ret;
@@ -476,10 +517,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (pfb->ram->size <= 32 * 1024 * 1024)
+ if (drm->device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
else
- if (pfb->ram->size <= 64 * 1024 * 1024)
+ if (drm->device.info.ram_size <= 64 * 1024 * 1024)
preferred_bpp = 16;
else
preferred_bpp = 32;
@@ -499,43 +540,25 @@ nouveau_fbcon_fini(struct drm_device *dev)
if (!drm->fbcon)
return;
+ nouveau_fbcon_accel_fini(dev);
nouveau_fbcon_destroy(dev, drm->fbcon);
kfree(drm->fbcon);
drm->fbcon = NULL;
}
void
-nouveau_fbcon_save_disable_accel(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
- drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
- }
-}
-
-void
-nouveau_fbcon_restore_accel(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
- }
-}
-
-void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
console_lock();
- if (state == 1)
- nouveau_fbcon_save_disable_accel(dev);
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
if (state == 0) {
- nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_accel_restore(dev);
nouveau_fbcon_zfill(dev, drm->fbcon);
}
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_accel_save_disable(dev);
console_unlock();
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fcff797d2084..34658cfa8f5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -37,6 +37,13 @@ struct nouveau_fbdev {
struct list_head fbdev_list;
struct drm_device *dev;
unsigned int saved_flags;
+ struct nvif_object surf2d;
+ struct nvif_object clip;
+ struct nvif_object rop;
+ struct nvif_object patt;
+ struct nvif_object gdi;
+ struct nvif_object blit;
+ struct nvif_object twod;
};
void nouveau_fbcon_restore(void);
@@ -61,8 +68,8 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
-void nouveau_fbcon_restore_accel(struct drm_device *dev);
+void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
+void nouveau_fbcon_accel_restore(struct drm_device *dev);
void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab5ea3b0d666..decfe6c4ac07 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,130 +28,219 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
+#include <trace/events/fence.h>
+
+#include <nvif/notify.h>
+#include <nvif/event.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-#include <engine/fifo.h>
+static const struct fence_ops nouveau_fence_ops_uevent;
+static const struct fence_ops nouveau_fence_ops_legacy;
-struct fence_work {
- struct work_struct base;
- struct list_head head;
- void (*func)(void *);
- void *data;
-};
+static inline struct nouveau_fence *
+from_fence(struct fence *fence)
+{
+ return container_of(fence, struct nouveau_fence, base);
+}
+
+static inline struct nouveau_fence_chan *
+nouveau_fctx(struct nouveau_fence *fence)
+{
+ return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
+}
static void
nouveau_fence_signal(struct nouveau_fence *fence)
{
- struct fence_work *work, *temp;
+ fence_signal_locked(&fence->base);
+ list_del(&fence->head);
+
+ if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- list_for_each_entry_safe(work, temp, &fence->work, head) {
- schedule_work(&work->base);
- list_del(&work->head);
+ if (!--fctx->notify_ref)
+ nvif_notify_put(&fctx->notify);
}
- fence->channel = NULL;
- list_del(&fence->head);
+ fence_put(&fence->base);
+}
+
+static struct nouveau_fence *
+nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+ struct nouveau_fence_priv *priv = (void*)drm->fence;
+
+ if (fence->ops != &nouveau_fence_ops_legacy &&
+ fence->ops != &nouveau_fence_ops_uevent)
+ return NULL;
+
+ if (fence->context < priv->context_base ||
+ fence->context >= priv->context_base + priv->contexts)
+ return NULL;
+
+ return from_fence(fence);
}
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence, *fnext;
- spin_lock(&fctx->lock);
- list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ struct nouveau_fence *fence;
+
+ nvif_notify_fini(&fctx->notify);
+
+ spin_lock_irq(&fctx->lock);
+ while (!list_empty(&fctx->pending)) {
+ fence = list_entry(fctx->pending.next, typeof(*fence), head);
+
nouveau_fence_signal(fence);
+ fence->channel = NULL;
}
- spin_unlock(&fctx->lock);
+ spin_unlock_irq(&fctx->lock);
+}
+
+static void
+nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
+{
+ struct nouveau_fence *fence;
+
+ u32 seq = fctx->read(chan);
+
+ while (!list_empty(&fctx->pending)) {
+ fence = list_entry(fctx->pending.next, typeof(*fence), head);
+
+ if ((int)(seq - fence->base.seqno) < 0)
+ return;
+
+ nouveau_fence_signal(fence);
+ }
+}
+
+static int
+nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
+{
+ struct nouveau_fence_chan *fctx =
+ container_of(notify, typeof(*fctx), notify);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ if (!list_empty(&fctx->pending)) {
+ struct nouveau_fence *fence;
+
+ fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ nouveau_fence_update(fence->channel, fctx);
+ }
+ spin_unlock_irqrestore(&fctx->lock, flags);
+
+ /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
+ return NVIF_NOTIFY_KEEP;
}
void
-nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
+nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
+ struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
+ int ret;
+
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
+ fctx->context = priv->context_base + chan->chid;
+
+ if (!priv->uevent)
+ return;
+
+ ret = nvif_notify_init(chan->object, NULL,
+ nouveau_fence_wait_uevent_handler, false,
+ G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) { },
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &fctx->notify);
+
+ WARN_ON(ret);
}
+struct nouveau_fence_work {
+ struct work_struct work;
+ struct fence_cb cb;
+ void (*func)(void *);
+ void *data;
+};
+
static void
nouveau_fence_work_handler(struct work_struct *kwork)
{
- struct fence_work *work = container_of(kwork, typeof(*work), base);
+ struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
work->func(work->data);
kfree(work);
}
+static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+{
+ struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
+
+ schedule_work(&work->work);
+}
+
void
-nouveau_fence_work(struct nouveau_fence *fence,
+nouveau_fence_work(struct fence *fence,
void (*func)(void *), void *data)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fence_chan *fctx;
- struct fence_work *work = NULL;
+ struct nouveau_fence_work *work;
- if (nouveau_fence_done(fence)) {
- func(data);
- return;
- }
+ if (fence_is_signaled(fence))
+ goto err;
- fctx = chan->fence;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
- WARN_ON(nouveau_fence_wait(fence, false, false));
- func(data);
- return;
+ WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
+ false, false));
+ goto err;
}
- spin_lock(&fctx->lock);
- if (!fence->channel) {
- spin_unlock(&fctx->lock);
- kfree(work);
- func(data);
- return;
- }
-
- INIT_WORK(&work->base, nouveau_fence_work_handler);
+ INIT_WORK(&work->work, nouveau_fence_work_handler);
work->func = func;
work->data = data;
- list_add(&work->head, &fence->work);
- spin_unlock(&fctx->lock);
-}
-static void
-nouveau_fence_update(struct nouveau_channel *chan)
-{
- struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_fence *fence, *fnext;
+ if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ goto err_free;
+ return;
- spin_lock(&fctx->lock);
- list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (fctx->read(chan) < fence->sequence)
- break;
-
- nouveau_fence_signal(fence);
- nouveau_fence_unref(&fence);
- }
- spin_unlock(&fctx->lock);
+err_free:
+ kfree(work);
+err:
+ func(data);
}
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
- fence->sequence = ++fctx->sequence;
+ if (priv->uevent)
+ fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock,
+ priv->context_base + chan->chid, ++fctx->sequence);
+ else
+ fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock,
+ priv->context_base + chan->chid, ++fctx->sequence);
+
+ trace_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- kref_get(&fence->kref);
- spin_lock(&fctx->lock);
+ fence_get(&fence->base);
+ spin_lock_irq(&fctx->lock);
+ nouveau_fence_update(chan, fctx);
list_add_tail(&fence->head, &fctx->pending);
- spin_unlock(&fctx->lock);
+ spin_unlock_irq(&fctx->lock);
}
return ret;
@@ -160,104 +249,70 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->channel)
- nouveau_fence_update(fence->channel);
- return !fence->channel;
-}
+ if (fence->base.ops == &nouveau_fence_ops_legacy ||
+ fence->base.ops == &nouveau_fence_ops_uevent) {
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ unsigned long flags;
-static int
-nouveau_fence_wait_uevent_handler(void *data, u32 type, int index)
-{
- struct nouveau_fence_priv *priv = data;
- wake_up_all(&priv->waiting);
- return NVKM_EVENT_KEEP;
-}
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ return true;
-static int
-nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+ spin_lock_irqsave(&fctx->lock, flags);
+ nouveau_fence_update(fence->channel, fctx);
+ spin_unlock_irqrestore(&fctx->lock, flags);
+ }
+ return fence_is_signaled(&fence->base);
+}
+static long
+nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
- struct nouveau_fence_priv *priv = chan->drm->fence;
- struct nouveau_eventh *handler;
- int ret = 0;
+ struct nouveau_fence *fence = from_fence(f);
+ unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+ unsigned long t = jiffies, timeout = t + wait;
- ret = nouveau_event_new(pfifo->uevent, 1, 0,
- nouveau_fence_wait_uevent_handler,
- priv, &handler);
- if (ret)
- return ret;
+ while (!nouveau_fence_done(fence)) {
+ ktime_t kt;
- nouveau_event_get(handler);
-
- if (fence->timeout) {
- unsigned long timeout = fence->timeout - jiffies;
-
- if (time_before(jiffies, fence->timeout)) {
- if (intr) {
- ret = wait_event_interruptible_timeout(
- priv->waiting,
- nouveau_fence_done(fence),
- timeout);
- } else {
- ret = wait_event_timeout(priv->waiting,
- nouveau_fence_done(fence),
- timeout);
- }
- }
+ t = jiffies;
- if (ret >= 0) {
- fence->timeout = jiffies + ret;
- if (time_after_eq(jiffies, fence->timeout))
- ret = -EBUSY;
- }
- } else {
- if (intr) {
- ret = wait_event_interruptible(priv->waiting,
- nouveau_fence_done(fence));
- } else {
- wait_event(priv->waiting, nouveau_fence_done(fence));
+ if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
}
+
+ __set_current_state(intr ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+
+ kt = ktime_set(0, sleep_time);
+ schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
+ sleep_time *= 2;
+ if (sleep_time > NSEC_PER_MSEC)
+ sleep_time = NSEC_PER_MSEC;
+
+ if (intr && signal_pending(current))
+ return -ERESTARTSYS;
}
- nouveau_event_ref(NULL, &handler);
- if (unlikely(ret < 0))
- return ret;
+ __set_current_state(TASK_RUNNING);
- return 0;
+ return timeout - t;
}
-int
-nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
+static int
+nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
- unsigned long sleep_time = NSEC_PER_MSEC / 1000;
- ktime_t t;
int ret = 0;
- while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
- ret = nouveau_fence_wait_uevent(fence, intr);
- if (ret < 0)
- return ret;
- }
-
while (!nouveau_fence_done(fence)) {
- if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
+ if (time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
- __set_current_state(intr ? TASK_INTERRUPTIBLE :
- TASK_UNINTERRUPTIBLE);
- if (lazy) {
- t = ktime_set(0, sleep_time);
- schedule_hrtimeout(&t, HRTIMER_MODE_REL);
- sleep_time *= 2;
- if (sleep_time > NSEC_PER_MSEC)
- sleep_time = NSEC_PER_MSEC;
- }
+ __set_current_state(intr ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -270,47 +325,86 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
}
int
-nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_channel *prev;
- int ret = 0;
+ long ret;
- prev = fence ? fence->channel : NULL;
- if (prev) {
- if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
- ret = fctx->sync(fence, prev, chan);
- if (unlikely(ret))
- ret = nouveau_fence_wait(fence, true, false);
- }
- }
+ if (!lazy)
+ return nouveau_fence_wait_busy(fence, intr);
- return ret;
+ ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+ else
+ return 0;
}
-static void
-nouveau_fence_del(struct kref *kref)
+int
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive)
{
- struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
- kfree(fence);
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct fence *fence;
+ struct reservation_object *resv = nvbo->bo.resv;
+ struct reservation_object_list *fobj;
+ struct nouveau_fence *f;
+ int ret = 0, i;
+
+ if (!exclusive) {
+ ret = reservation_object_reserve_shared(resv);
+
+ if (ret)
+ return ret;
+ }
+
+ fobj = reservation_object_get_list(resv);
+ fence = reservation_object_get_excl(resv);
+
+ if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
+ struct nouveau_channel *prev = NULL;
+
+ f = nouveau_local_fence(fence, chan->drm);
+ if (f)
+ prev = f->channel;
+
+ if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+ ret = fence_wait(fence, true);
+
+ return ret;
+ }
+
+ if (!exclusive || !fobj)
+ return ret;
+
+ for (i = 0; i < fobj->shared_count && !ret; ++i) {
+ struct nouveau_channel *prev = NULL;
+
+ fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(resv));
+
+ f = nouveau_local_fence(fence, chan->drm);
+ if (f)
+ prev = f->channel;
+
+ if (!prev || (ret = fctx->sync(f, prev, chan)))
+ ret = fence_wait(fence, true);
+
+ if (ret)
+ break;
+ }
+
+ return ret;
}
void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- kref_put(&(*pfence)->kref, nouveau_fence_del);
+ fence_put(&(*pfence)->base);
*pfence = NULL;
}
-struct nouveau_fence *
-nouveau_fence_ref(struct nouveau_fence *fence)
-{
- if (fence)
- kref_get(&fence->kref);
- return fence;
-}
-
int
nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
struct nouveau_fence **pfence)
@@ -325,9 +419,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
if (!fence)
return -ENOMEM;
- INIT_LIST_HEAD(&fence->work);
fence->sysmem = sysmem;
- kref_init(&fence->kref);
ret = nouveau_fence_emit(fence, chan);
if (ret)
@@ -336,3 +428,92 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
*pfence = fence;
return ret;
}
+
+static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+{
+ return "nouveau";
+}
+
+static const char *nouveau_fence_get_timeline_name(struct fence *f)
+{
+ struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+
+ return fence->channel ? fctx->name : "dead channel";
+}
+
+/*
+ * In an ideal world, read would not assume the channel context is still alive.
+ * This function may be called from another device, running into free memory as a
+ * result. The drm node should still be there, so we can derive the index from
+ * the fence context.
+ */
+static bool nouveau_fence_is_signaled(struct fence *f)
+{
+ struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan = fence->channel;
+
+ return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+}
+
+static bool nouveau_fence_no_signaling(struct fence *f)
+{
+ struct nouveau_fence *fence = from_fence(f);
+
+ /*
+ * caller should have a reference on the fence,
+ * else fence could get freed here
+ */
+ WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
+
+ /*
+ * This needs uevents to work correctly, but fence_add_callback relies on
+ * being able to enable signaling. It will still get signaled eventually,
+ * just not right away.
+ */
+ if (nouveau_fence_is_signaled(f)) {
+ list_del(&fence->head);
+
+ fence_put(&fence->base);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct fence_ops nouveau_fence_ops_legacy = {
+ .get_driver_name = nouveau_fence_get_get_driver_name,
+ .get_timeline_name = nouveau_fence_get_timeline_name,
+ .enable_signaling = nouveau_fence_no_signaling,
+ .signaled = nouveau_fence_is_signaled,
+ .wait = nouveau_fence_wait_legacy,
+ .release = NULL
+};
+
+static bool nouveau_fence_enable_signaling(struct fence *f)
+{
+ struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ bool ret;
+
+ if (!fctx->notify_ref++)
+ nvif_notify_get(&fctx->notify);
+
+ ret = nouveau_fence_no_signaling(f);
+ if (ret)
+ set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ else if (!--fctx->notify_ref)
+ nvif_notify_put(&fctx->notify);
+
+ return ret;
+}
+
+static const struct fence_ops nouveau_fence_ops_uevent = {
+ .get_driver_name = nouveau_fence_get_get_driver_name,
+ .get_timeline_name = nouveau_fence_get_timeline_name,
+ .enable_signaling = nouveau_fence_enable_signaling,
+ .signaled = nouveau_fence_is_signaled,
+ .wait = fence_default_wait,
+ .release = NULL
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c57bb61da58c..986c8135e564 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,33 +1,35 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
+#include <linux/fence.h>
+#include <nvif/notify.h>
+
struct nouveau_drm;
+struct nouveau_bo;
struct nouveau_fence {
+ struct fence base;
+
struct list_head head;
- struct list_head work;
- struct kref kref;
bool sysmem;
struct nouveau_channel *channel;
unsigned long timeout;
- u32 sequence;
};
int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
struct nouveau_fence **);
-struct nouveau_fence *
-nouveau_fence_ref(struct nouveau_fence *);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
-int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
+int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive);
struct nouveau_fence_chan {
+ spinlock_t lock;
struct list_head pending;
struct list_head flip;
@@ -38,8 +40,12 @@ struct nouveau_fence_chan {
int (*emit32)(struct nouveau_channel *, u64, u32);
int (*sync32)(struct nouveau_channel *, u64, u32);
- spinlock_t lock;
u32 sequence;
+ u32 context;
+ char name[24];
+
+ struct nvif_notify notify;
+ int notify_ref;
};
struct nouveau_fence_priv {
@@ -49,13 +55,13 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- wait_queue_head_t waiting;
+ u32 contexts, context_base;
bool uevent;
};
#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
-void nouveau_fence_context_new(struct nouveau_fence_chan *);
+void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
void nouveau_fence_context_del(struct nouveau_fence_chan *);
int nv04_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df9d451afdcd..b7dbd16904e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <subdev/fb.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
@@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
+ ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
if (ret) {
kfree(vma);
goto out;
@@ -100,17 +98,23 @@ static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
- struct nouveau_fence *fence = NULL;
+ struct reservation_object *resv = nvbo->bo.resv;
+ struct reservation_object_list *fobj;
+ struct fence *fence = NULL;
+
+ fobj = reservation_object_get_list(resv);
list_del(&vma->head);
- if (mapped) {
- spin_lock(&nvbo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
- }
+ if (fobj && fobj->shared_count > 1)
+ ttm_bo_wait(&nvbo->bo, true, false, false);
+ else if (fobj && fobj->shared_count == 1)
+ fence = rcu_dereference_protected(fobj->shared[0],
+ reservation_object_held(resv));
+ else
+ fence = reservation_object_get_excl(nvbo->bo.resv);
- if (fence) {
+ if (fence && mapped) {
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
} else {
if (mapped)
@@ -118,7 +122,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
nouveau_vm_put(vma);
kfree(vma);
}
- nouveau_fence_unref(&fence);
}
void
@@ -129,14 +132,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
if (--vma->refcount == 0)
nouveau_gem_object_unmap(nvbo, vma);
@@ -173,7 +176,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
/* Initialize the embedded gem-object. We return a single gem-reference
@@ -202,8 +205,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->base.vm) {
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ if (cli->vm) {
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma)
return -EINVAL;
@@ -223,13 +226,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -290,24 +293,23 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
}
struct validate_op {
- struct list_head vram_list;
- struct list_head gart_list;
- struct list_head both_list;
+ struct list_head list;
struct ww_acquire_ctx ticket;
};
static void
-validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
- struct ww_acquire_ctx *ticket)
+validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- struct list_head *entry, *tmp;
struct nouveau_bo *nvbo;
+ struct drm_nouveau_gem_pushbuf_bo *b;
- list_for_each_safe(entry, tmp, list) {
- nvbo = list_entry(entry, struct nouveau_bo, entry);
+ while (!list_empty(&op->list)) {
+ nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
+ b = &pbbo[nvbo->pbbo_index];
if (likely(fence))
- nouveau_bo_fence(nvbo, fence);
+ nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -316,23 +318,16 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
- ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
+ ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
static void
-validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
+validate_fini(struct validate_op *op, struct nouveau_fence *fence,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- validate_fini_list(&op->vram_list, fence, &op->ticket);
- validate_fini_list(&op->gart_list, fence, &op->ticket);
- validate_fini_list(&op->both_list, fence, &op->ticket);
-}
-
-static void
-validate_fini(struct validate_op *op, struct nouveau_fence *fence)
-{
- validate_fini_no_ticket(op, fence);
+ validate_fini_no_ticket(op, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
@@ -346,11 +341,14 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
int trycnt = 0;
int ret, i;
struct nouveau_bo *res_bo = NULL;
+ LIST_HEAD(gart_list);
+ LIST_HEAD(vram_list);
+ LIST_HEAD(both_list);
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
- NV_ERROR(cli, "%s failed and gave up.\n", __func__);
+ NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -361,10 +359,9 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -ENOENT;
+ NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+ ret = -ENOENT;
+ break;
}
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
@@ -374,17 +371,19 @@ retry:
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(cli, "multiple instances of buffer %d on "
+ NV_PRINTK(error, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
if (ret) {
- validate_fini_no_ticket(op, NULL);
+ list_splice_tail_init(&vram_list, &op->list);
+ list_splice_tail_init(&gart_list, &op->list);
+ list_splice_tail_init(&both_list, &op->list);
+ validate_fini_no_ticket(op, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
@@ -392,12 +391,9 @@ retry:
res_bo = nvbo;
}
if (unlikely(ret)) {
- ww_acquire_done(&op->ticket);
- ww_acquire_fini(&op->ticket);
- drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail reserve\n");
- return ret;
+ NV_PRINTK(error, cli, "fail reserve\n");
+ break;
}
}
@@ -406,45 +402,32 @@ retry:
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
- list_add_tail(&nvbo->entry, &op->both_list);
+ list_add_tail(&nvbo->entry, &both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
- list_add_tail(&nvbo->entry, &op->vram_list);
+ list_add_tail(&nvbo->entry, &vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
- list_add_tail(&nvbo->entry, &op->gart_list);
+ list_add_tail(&nvbo->entry, &gart_list);
else {
- NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
+ NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
- list_add_tail(&nvbo->entry, &op->both_list);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -EINVAL;
+ list_add_tail(&nvbo->entry, &both_list);
+ ret = -EINVAL;
+ break;
}
if (nvbo == res_bo)
goto retry;
}
ww_acquire_done(&op->ticket);
- return 0;
-}
-
-static int
-validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
-{
- struct nouveau_fence *fence = NULL;
- int ret = 0;
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- if (fence) {
- ret = nouveau_fence_sync(fence, chan);
- nouveau_fence_unref(&fence);
- }
-
+ list_splice_tail(&vram_list, &op->list);
+ list_splice_tail(&gart_list, &op->list);
+ list_splice_tail(&both_list, &op->list);
+ if (ret)
+ validate_fini(op, NULL, NULL);
return ret;
+
}
static int
@@ -465,24 +448,25 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail set_domain\n");
+ NV_PRINTK(error, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail ttm_validate\n");
+ NV_PRINTK(error, cli, "fail ttm_validate\n");
return ret;
}
- ret = validate_sync(chan, nvbo);
+ ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail post-validate sync\n");
+ if (ret != -ERESTARTSYS)
+ NV_PRINTK(error, cli, "fail post-validate sync\n");
return ret;
}
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -515,11 +499,9 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct validate_op *op, int *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
- int ret, relocs = 0;
+ int ret;
- INIT_LIST_HEAD(&op->vram_list);
- INIT_LIST_HEAD(&op->gart_list);
- INIT_LIST_HEAD(&op->both_list);
+ INIT_LIST_HEAD(&op->list);
if (nr_buffers == 0)
return 0;
@@ -527,38 +509,18 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate_init\n");
+ NV_PRINTK(error, cli, "validate_init\n");
return ret;
}
- ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate vram_list\n");
- validate_fini(op, NULL);
+ NV_PRINTK(error, cli, "validating bo list\n");
+ validate_fini(op, NULL, NULL);
return ret;
}
- relocs += ret;
-
- ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
- if (unlikely(ret < 0)) {
- if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate gart_list\n");
- validate_fini(op, NULL);
- return ret;
- }
- relocs += ret;
-
- ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
- if (unlikely(ret < 0)) {
- if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate both_list\n");
- validate_fini(op, NULL);
- return ret;
- }
- relocs += ret;
-
- *apply_relocs = relocs;
+ *apply_relocs = ret;
return 0;
}
@@ -613,7 +575,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -623,7 +585,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc container bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -631,7 +593,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(cli, "reloc outside of bo\n");
+ NV_PRINTK(error, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -640,7 +602,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(cli, "failed kmap for reloc\n");
+ NV_PRINTK(error, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -661,11 +623,9 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
data |= r->vor;
}
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
+ ret = ttm_bo_wait(&nvbo->bo, true, false, false);
if (ret) {
- NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -696,7 +656,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+ if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
chan = temp->chan;
break;
}
@@ -711,19 +671,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -741,7 +701,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(cli, "push %d buffer not in list\n", i);
+ NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -752,7 +712,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate: %d\n", ret);
+ NV_PRINTK(error, cli, "validate: %d\n", ret);
goto out_prevalid;
}
@@ -760,7 +720,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(cli, "reloc apply: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -768,7 +728,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(cli, "nv50cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -780,10 +740,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(cli, "cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "cal_space: %d\n", ret);
goto out;
}
@@ -797,7 +757,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(cli, "jmp_space: %d\n", ret);
+ NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -835,13 +795,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
+ NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
out:
- validate_fini(&op, fence);
+ validate_fini(&op, fence, bo);
nouveau_fence_unref(&fence);
out_prevalid:
@@ -853,7 +813,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -886,17 +846,29 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
- int ret = -EINVAL;
+ bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
+ if (no_wait)
+ ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
+ else {
+ long lret;
+
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
+ }
drm_gem_object_unreference_unlocked(gem);
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 7caca057bc38..ddab762d81fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,6 +35,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 19fd767bab10..afb36d66e78d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,17 +34,13 @@
#include "nouveau_drm.h"
#include "nouveau_hwmon.h"
-#include <subdev/gpio.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int temp = therm->temp_get(therm);
if (temp < 0)
@@ -70,7 +66,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -82,7 +78,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -103,7 +99,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -115,7 +111,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -135,7 +131,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
@@ -146,7 +142,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -166,7 +162,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
@@ -177,7 +173,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -198,7 +194,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
@@ -210,7 +206,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -231,7 +227,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
@@ -244,7 +240,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -264,7 +260,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
@@ -276,7 +272,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -297,7 +293,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
@@ -310,7 +306,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -350,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
@@ -363,7 +359,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
@@ -379,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -402,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->fan_get(therm);
@@ -418,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret = -ENODEV;
long value;
@@ -442,7 +438,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
@@ -458,7 +454,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -482,7 +478,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
@@ -498,7 +494,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -565,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
new file mode 100644
index 000000000000..47ca88623753
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+/*******************************************************************************
+ * NVIF client driver - NVKM directly linked
+ ******************************************************************************/
+
+#include <core/client.h>
+#include <core/notify.h>
+#include <core/ioctl.h>
+
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/notify.h>
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_usif.h"
+
+static void
+nvkm_client_unmap(void *priv, void *ptr, u32 size)
+{
+ iounmap(ptr);
+}
+
+static void *
+nvkm_client_map(void *priv, u64 handle, u32 size)
+{
+ return ioremap(handle, size);
+}
+
+static int
+nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+{
+ return nvkm_ioctl(priv, super, data, size, hack);
+}
+
+static int
+nvkm_client_resume(void *priv)
+{
+ return nouveau_client_init(priv);
+}
+
+static int
+nvkm_client_suspend(void *priv)
+{
+ return nouveau_client_fini(priv, true);
+}
+
+static void
+nvkm_client_fini(void *priv)
+{
+ struct nouveau_object *client = priv;
+ nouveau_client_fini(nv_client(client), false);
+ atomic_set(&client->refcount, 1);
+ nouveau_object_ref(NULL, &client);
+}
+
+static int
+nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
+{
+ const union {
+ struct nvif_notify_req_v0 v0;
+ } *args = header;
+ u8 route;
+
+ if (length == sizeof(args->v0) && args->v0.version == 0) {
+ route = args->v0.route;
+ } else {
+ WARN_ON(1);
+ return NVKM_NOTIFY_DROP;
+ }
+
+ switch (route) {
+ case NVDRM_NOTIFY_NVIF:
+ return nvif_notify(header, length, data, size);
+ case NVDRM_NOTIFY_USIF:
+ return usif_notify(header, length, data, size);
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return NVKM_NOTIFY_DROP;
+}
+
+static int
+nvkm_client_init(const char *name, u64 device, const char *cfg,
+ const char *dbg, void **ppriv)
+{
+ struct nouveau_client *client;
+ int ret;
+
+ ret = nouveau_client_create(name, device, cfg, dbg, &client);
+ *ppriv = client;
+ if (ret)
+ return ret;
+
+ client->ntfy = nvkm_client_ntfy;
+ return 0;
+}
+
+const struct nvif_driver
+nvif_driver_nvkm = {
+ .name = "nvkm",
+ .init = nvkm_client_init,
+ .fini = nvkm_client_fini,
+ .suspend = nvkm_client_suspend,
+ .resume = nvkm_client_resume,
+ .ioctl = nvkm_client_ioctl,
+ .map = nvkm_client_map,
+ .unmap = nvkm_client_unmap,
+ .keep = false,
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
new file mode 100644
index 000000000000..246a824c16ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <soc/tegra/pmc.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_platform.h"
+
+static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
+{
+ int err;
+
+ err = regulator_enable(gpu->vdd);
+ if (err)
+ goto err_power;
+
+ err = clk_prepare_enable(gpu->clk);
+ if (err)
+ goto err_clk;
+ err = clk_prepare_enable(gpu->clk_pwr);
+ if (err)
+ goto err_clk_pwr;
+ clk_set_rate(gpu->clk_pwr, 204000000);
+ udelay(10);
+
+ reset_control_assert(gpu->rst);
+ udelay(10);
+
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+ if (err)
+ goto err_clamp;
+ udelay(10);
+
+ reset_control_deassert(gpu->rst);
+ udelay(10);
+
+ return 0;
+
+err_clamp:
+ clk_disable_unprepare(gpu->clk_pwr);
+err_clk_pwr:
+ clk_disable_unprepare(gpu->clk);
+err_clk:
+ regulator_disable(gpu->vdd);
+err_power:
+ return err;
+}
+
+static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
+{
+ int err;
+
+ reset_control_assert(gpu->rst);
+ udelay(10);
+
+ clk_disable_unprepare(gpu->clk_pwr);
+ clk_disable_unprepare(gpu->clk);
+ udelay(10);
+
+ err = regulator_disable(gpu->vdd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int nouveau_platform_probe(struct platform_device *pdev)
+{
+ struct nouveau_platform_gpu *gpu;
+ struct nouveau_platform_device *device;
+ struct drm_device *drm;
+ int err;
+
+ gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(gpu->vdd))
+ return PTR_ERR(gpu->vdd);
+
+ gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
+ if (IS_ERR(gpu->rst))
+ return PTR_ERR(gpu->rst);
+
+ gpu->clk = devm_clk_get(&pdev->dev, "gpu");
+ if (IS_ERR(gpu->clk))
+ return PTR_ERR(gpu->clk);
+
+ gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+ if (IS_ERR(gpu->clk_pwr))
+ return PTR_ERR(gpu->clk_pwr);
+
+ err = nouveau_platform_power_up(gpu);
+ if (err)
+ return err;
+
+ drm = nouveau_platform_device_create(pdev, &device);
+ if (IS_ERR(drm)) {
+ err = PTR_ERR(drm);
+ goto power_down;
+ }
+
+ device->gpu = gpu;
+
+ err = drm_dev_register(drm, 0);
+ if (err < 0)
+ goto err_unref;
+
+ return 0;
+
+err_unref:
+ drm_dev_unref(drm);
+
+ return 0;
+
+power_down:
+ nouveau_platform_power_down(gpu);
+
+ return err;
+}
+
+static int nouveau_platform_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm_dev = platform_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
+
+ nouveau_drm_device_remove(drm_dev);
+
+ return nouveau_platform_power_down(gpu);
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id nouveau_platform_match[] = {
+ { .compatible = "nvidia,gk20a" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, nouveau_platform_match);
+#endif
+
+struct platform_driver nouveau_platform_driver = {
+ .driver = {
+ .name = "nouveau",
+ .of_match_table = of_match_ptr(nouveau_platform_match),
+ },
+ .probe = nouveau_platform_probe,
+ .remove = nouveau_platform_remove,
+};
+
+module_platform_driver(nouveau_platform_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
new file mode 100644
index 000000000000..91f66504900e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_PLATFORM_H__
+#define __NOUVEAU_PLATFORM_H__
+
+#include "core/device.h"
+
+struct reset_control;
+struct clk;
+struct regulator;
+
+struct nouveau_platform_gpu {
+ struct reset_control *rst;
+ struct clk *clk;
+ struct clk *clk_pwr;
+
+ struct regulator *vdd;
+};
+
+struct nouveau_platform_device {
+ struct nouveau_device device;
+
+ struct nouveau_platform_gpu *gpu;
+};
+
+#define nv_device_to_platform(d) \
+ container_of(d, struct nouveau_platform_device, device)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 51a2cb102b44..1f51008e4d26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -102,3 +102,10 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
+
+struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+ return nvbo->bo.resv;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a4d22e5eb176..01707e7deaf5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,8 +1,6 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include <subdev/fb.h>
-
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
@@ -104,7 +102,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
return NULL;
nvbe->dev = drm->dev;
- if (nv_device(drm->device)->card_type < NV_50)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 75dda2b07176..3c6962d15b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -22,10 +22,15 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <nvif/os.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+
#include "nouveau_sysfs.h"
-#include <core/object.h>
-#include <core/class.h>
+MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future");
+static int nouveau_pstate;
+module_param_named(pstate, nouveau_pstate, int, 0400);
static inline struct drm_device *
drm_device(struct device *d)
@@ -43,38 +48,42 @@ static ssize_t
nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
- struct nv_control_pstate_info info;
+ struct nvif_control_pstate_info_v0 info = {};
size_t cnt = PAGE_SIZE;
char *buf = b;
int ret, i;
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO,
+ &info, sizeof(info));
if (ret)
return ret;
for (i = 0; i < info.count + 1; i++) {
const s32 state = i < info.count ? i :
- NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
- struct nv_control_pstate_attr attr = {
+ NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
+ struct nvif_control_pstate_attr_v0 attr = {
.state = state,
.index = 0,
};
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
- &attr, sizeof(attr));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
if (ret)
return ret;
if (i < info.count)
snappendf(buf, cnt, "%02x:", attr.state);
else
- snappendf(buf, cnt, "--:");
+ snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" :
+ info.pwrsrc == 1 ? "AC" :
+ "--");
attr.index = 0;
do {
attr.state = state;
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
- &attr, sizeof(attr));
+ ret = nvif_mthd(&sysfs->ctrl,
+ NVIF_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
if (ret)
return ret;
@@ -84,9 +93,20 @@ nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
snappendf(buf, cnt, " %s", attr.unit);
} while (attr.index);
- if ((state >= 0 && info.pstate == state) ||
- (state < 0 && info.ustate < 0))
- snappendf(buf, cnt, " *");
+ if (state >= 0) {
+ if (info.ustate_ac == state)
+ snappendf(buf, cnt, " AC");
+ if (info.ustate_dc == state)
+ snappendf(buf, cnt, " DC");
+ if (info.pstate == state)
+ snappendf(buf, cnt, " *");
+ } else {
+ if (info.ustate_ac < -1)
+ snappendf(buf, cnt, " AC");
+ if (info.ustate_dc < -1)
+ snappendf(buf, cnt, " DC");
+ }
+
snappendf(buf, cnt, "\n");
}
@@ -98,26 +118,36 @@ nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
const char *buf, size_t count)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
- struct nv_control_pstate_user args;
+ struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
long value, ret;
char *tmp;
if ((tmp = strchr(buf, '\n')))
*tmp = '\0';
+ if (!strncasecmp(buf, "dc:", 3)) {
+ args.pwrsrc = 0;
+ buf += 3;
+ } else
+ if (!strncasecmp(buf, "ac:", 3)) {
+ args.pwrsrc = 1;
+ buf += 3;
+ }
+
if (!strcasecmp(buf, "none"))
- args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
+ args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
else
if (!strcasecmp(buf, "auto"))
- args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
+ args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
else {
ret = kstrtol(buf, 16, &value);
if (ret)
return ret;
- args.state = value;
+ args.ustate = value;
}
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER,
+ &args, sizeof(args));
if (ret < 0)
return ret;
@@ -132,11 +162,11 @@ nouveau_sysfs_fini(struct drm_device *dev)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
- if (sysfs->ctrl) {
- device_remove_file(nv_device_base(device), &dev_attr_pstate);
- nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
+ if (sysfs && sysfs->ctrl.priv) {
+ device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
+ nvif_object_fini(&sysfs->ctrl);
}
drm->sysfs = NULL;
@@ -147,18 +177,22 @@ int
nouveau_sysfs_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_sysfs *sysfs;
int ret;
+ if (!nouveau_pstate)
+ return 0;
+
sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
if (!sysfs)
return -ENOMEM;
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
- NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
+ ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+ NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
+ &sysfs->ctrl);
if (ret == 0)
- device_create_file(nv_device_base(device), &dev_attr_pstate);
+ device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
index 74b47f1e01ed..f973378160f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -4,7 +4,7 @@
#include "nouveau_drm.h"
struct nouveau_sysfs {
- struct nouveau_object *ctrl;
+ struct nvif_object ctrl;
};
static inline struct nouveau_sysfs *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7e185c122750..e81d086577ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,10 +24,6 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/instmem.h>
-
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
@@ -36,7 +32,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
man->priv = pfb;
return 0;
}
@@ -67,7 +63,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
nouveau_mem_node_cleanup(mem->mm_node);
pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
}
@@ -75,12 +71,11 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
@@ -162,8 +157,7 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -176,14 +170,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- switch (nv_device(drm->device)->card_type) {
- case NV_50:
- if (nv_device(drm->device)->chipset != 0x50)
+ switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TESLA:
+ if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
- case NV_C0:
- case NV_D0:
- case NV_E0:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
@@ -208,12 +201,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_debug
};
+/*XXX*/
#include <core/subdev/vm/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
struct nv04_vmmgr_priv *priv = (void *)vmm;
struct nouveau_vm *vm = NULL;
nouveau_vm_ref(priv->vm, &vm, NULL);
@@ -243,8 +237,7 @@ nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node;
@@ -357,12 +350,11 @@ int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- struct nouveau_device *device = nv_device(drm->device);
u32 bits;
int ret;
- bits = nouveau_vmmgr(drm->device)->dma_bits;
- if (nv_device_is_pci(device)) {
+ bits = nvkm_vmmgr(&drm->device)->dma_bits;
+ if (nv_device_is_pci(nvkm_device(&drm->device))) {
if (drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
@@ -394,8 +386,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
/* VRAM init */
- drm->gem.vram_available = nouveau_fb(drm->device)->ram->size;
- drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
+ drm->gem.vram_available = drm->device.info.ram_user;
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
@@ -404,12 +395,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
- nv_device_resource_len(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
+ nv_device_resource_len(nvkm_device(&drm->device), 1));
/* GART init */
if (drm->agp.stat != ENABLED) {
- drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
+ drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
new file mode 100644
index 000000000000..cb1182d7e80e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_usif.h"
+
+#include <nvif/notify.h>
+#include <nvif/unpack.h>
+#include <nvif/client.h>
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
+
+struct usif_notify_p {
+ struct drm_pending_event base;
+ struct {
+ struct drm_event base;
+ u8 data[];
+ } e;
+};
+
+struct usif_notify {
+ struct list_head head;
+ atomic_t enabled;
+ u32 handle;
+ u16 reply;
+ u8 route;
+ u64 token;
+ struct usif_notify_p *p;
+};
+
+static inline struct usif_notify *
+usif_notify_find(struct drm_file *filp, u32 handle)
+{
+ struct nouveau_cli *cli = nouveau_cli(filp);
+ struct usif_notify *ntfy;
+ list_for_each_entry(ntfy, &cli->notifys, head) {
+ if (ntfy->handle == handle)
+ return ntfy;
+ }
+ return NULL;
+}
+
+static inline void
+usif_notify_dtor(struct usif_notify *ntfy)
+{
+ list_del(&ntfy->head);
+ kfree(ntfy);
+}
+
+int
+usif_notify(const void *header, u32 length, const void *data, u32 size)
+{
+ struct usif_notify *ntfy = NULL;
+ const union {
+ struct nvif_notify_rep_v0 v0;
+ } *rep = header;
+ struct drm_device *dev;
+ struct drm_file *filp;
+ unsigned long flags;
+
+ if (length == sizeof(rep->v0) && rep->v0.version == 0) {
+ if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token)))
+ return NVIF_NOTIFY_DROP;
+ BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF);
+ } else
+ if (WARN_ON(1))
+ return NVIF_NOTIFY_DROP;
+
+ if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
+ return NVIF_NOTIFY_DROP;
+ filp = ntfy->p->base.file_priv;
+ dev = filp->minor->dev;
+
+ memcpy(&ntfy->p->e.data[0], header, length);
+ memcpy(&ntfy->p->e.data[length], data, size);
+ switch (rep->v0.version) {
+ case 0: {
+ struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
+ rep->route = ntfy->route;
+ rep->token = ntfy->token;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
+ list_add_tail(&ntfy->p->base.link, &filp->event_list);
+ filp->event_space -= ntfy->p->e.base.length;
+ }
+ wake_up_interruptible(&filp->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ atomic_set(&ntfy->enabled, 0);
+ return NVIF_NOTIFY_DROP;
+}
+
+static int
+usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_new_v0 v0;
+ } *args = data;
+ union {
+ struct nvif_notify_req_v0 v0;
+ } *req;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (usif_notify_find(f, args->v0.index))
+ return -EEXIST;
+ } else
+ return ret;
+ req = data;
+
+ if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
+ return -ENOMEM;
+ atomic_set(&ntfy->enabled, 0);
+
+ if (nvif_unpack(req->v0, 0, 0, true)) {
+ ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
+ ntfy->route = req->v0.route;
+ ntfy->token = req->v0.token;
+ req->v0.route = NVDRM_NOTIFY_USIF;
+ req->v0.token = (unsigned long)(void *)ntfy;
+ ret = nvif_client_ioctl(client, argv, argc);
+ req->v0.token = ntfy->token;
+ req->v0.route = ntfy->route;
+ ntfy->handle = args->v0.index;
+ }
+
+ if (ret == 0)
+ list_add(&ntfy->head, &cli->notifys);
+ if (ret)
+ kfree(ntfy);
+ return ret;
+}
+
+static int
+usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret == 0)
+ usif_notify_dtor(ntfy);
+ return ret;
+}
+
+static int
+usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ if (atomic_xchg(&ntfy->enabled, 1))
+ return 0;
+
+ ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
+ if (ret = -ENOMEM, !ntfy->p)
+ goto done;
+ ntfy->p->base.event = &ntfy->p->e.base;
+ ntfy->p->base.file_priv = f;
+ ntfy->p->base.pid = current->pid;
+ ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
+ ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
+ ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+done:
+ if (ret) {
+ atomic_set(&ntfy->enabled, 0);
+ kfree(ntfy->p);
+ }
+ return ret;
+}
+
+static int
+usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_put_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret == 0 && atomic_xchg(&ntfy->enabled, 0))
+ kfree(ntfy->p);
+ return ret;
+}
+
+struct usif_object {
+ struct list_head head;
+ struct list_head ntfy;
+ u8 route;
+ u64 token;
+};
+
+static void
+usif_object_dtor(struct usif_object *object)
+{
+ list_del(&object->head);
+ kfree(object);
+}
+
+static int
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_new_v0 v0;
+ } *args = data;
+ struct usif_object *object;
+ int ret;
+
+ if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
+ return -ENOMEM;
+ list_add(&object->head, &cli->objects);
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ args->v0.route = NVDRM_OBJECT_USIF;
+ args->v0.token = (unsigned long)(void *)object;
+ ret = nvif_client_ioctl(client, argv, argc);
+ args->v0.token = object->token;
+ args->v0.route = object->route;
+ }
+
+ if (ret)
+ usif_object_dtor(object);
+ return ret;
+}
+
+int
+usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(filp);
+ struct nvif_client *client = &cli->base;
+ void *data = kmalloc(argc, GFP_KERNEL);
+ u32 size = argc;
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *argv = data;
+ struct usif_object *object;
+ u8 owner;
+ int ret;
+
+ if (ret = -ENOMEM, !argv)
+ goto done;
+ if (ret = -EFAULT, copy_from_user(argv, user, size))
+ goto done;
+
+ if (nvif_unpack(argv->v0, 0, 0, true)) {
+ /* block access to objects not created via this interface */
+ owner = argv->v0.owner;
+ argv->v0.owner = NVDRM_OBJECT_USIF;
+ } else
+ goto done;
+
+ mutex_lock(&cli->mutex);
+ switch (argv->v0.type) {
+ case NVIF_IOCTL_V0_NEW:
+ /* ... except if we're creating children */
+ argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
+ ret = usif_object_new(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_NEW:
+ ret = usif_notify_new(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_DEL:
+ ret = usif_notify_del(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_GET:
+ ret = usif_notify_get(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_PUT:
+ ret = usif_notify_put(filp, data, size, argv, argc);
+ break;
+ default:
+ ret = nvif_client_ioctl(client, argv, argc);
+ break;
+ }
+ if (argv->v0.route == NVDRM_OBJECT_USIF) {
+ object = (void *)(unsigned long)argv->v0.token;
+ argv->v0.route = object->route;
+ argv->v0.token = object->token;
+ if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
+ list_del(&object->head);
+ kfree(object);
+ }
+ } else {
+ argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
+ argv->v0.token = 0;
+ }
+ argv->v0.owner = owner;
+ mutex_unlock(&cli->mutex);
+
+ if (copy_to_user(user, argv, argc))
+ ret = -EFAULT;
+done:
+ kfree(argv);
+ return ret;
+}
+
+void
+usif_client_fini(struct nouveau_cli *cli)
+{
+ struct usif_object *object, *otemp;
+ struct usif_notify *notify, *ntemp;
+
+ list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) {
+ usif_notify_dtor(notify);
+ }
+
+ list_for_each_entry_safe(object, otemp, &cli->objects, head) {
+ usif_object_dtor(object);
+ }
+}
+
+void
+usif_client_init(struct nouveau_cli *cli)
+{
+ INIT_LIST_HEAD(&cli->objects);
+ INIT_LIST_HEAD(&cli->notifys);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
new file mode 100644
index 000000000000..c037e3ae8c70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -0,0 +1,9 @@
+#ifndef __NOUVEAU_USIF_H__
+#define __NOUVEAU_USIF_H__
+
+void usif_client_init(struct nouveau_cli *);
+void usif_client_fini(struct nouveau_cli *);
+int usif_ioctl(struct drm_file *, void __user *, u32);
+int usif_notify(const void *, u32, const void *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 4f4c3fec6916..18d55d447248 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,14 +12,16 @@
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
- struct nouveau_device *device = nouveau_dev(priv);
+ struct nvif_device *device = &nouveau_drm(priv)->device;
- if (device->card_type == NV_40 && device->chipset >= 0x4c)
- nv_wr32(device, 0x088060, state);
- else if (device->chipset >= 0x40)
- nv_wr32(device, 0x088054, state);
+ if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
+ device->info.chipset >= 0x4c)
+ nvif_wr32(device, 0x088060, state);
else
- nv_wr32(device, 0x001854, state);
+ if (device->info.chipset >= 0x40)
+ nvif_wr32(device, 0x088054, state);
+ else
+ nvif_wr32(device, 0x001854, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8fe32bbed99a..4ef602c5469d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,8 +22,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <core/object.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
@@ -141,8 +139,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_object *object;
+ struct nvif_device *device = &drm->device;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -174,35 +171,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
- device->card_type >= NV_10 ? 0x0062 : 0x0042,
- NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0062,
+ device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
+ 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
- 0x0019, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+ &nfbdev->clip);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
- 0x0043, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+ &nfbdev->rop);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
- 0x0044, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+ &nfbdev->patt);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
- 0x004a, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+ &nfbdev->gdi);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
- device->chipset >= 0x11 ? 0x009f : 0x005f,
- NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x005f,
+ device->info.chipset >= 0x11 ? 0x009f : 0x005f,
+ NULL, 0, &nfbdev->blit);
if (ret)
return ret;
@@ -212,10 +209,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
@@ -223,12 +220,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvRop);
+ OUT_RING(chan, nfbdev->rop.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
OUT_RING(chan, 0x55);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvImagePatt);
+ OUT_RING(chan, nfbdev->patt.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
@@ -244,18 +241,18 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvClipRect);
+ OUT_RING(chan, nfbdev->clip.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
- OUT_RING(chan, NvImageBlit);
+ OUT_RING(chan, nfbdev->blit.handle);
BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
- if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
+ if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
@@ -263,12 +260,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
- OUT_RING(chan, NvGdiRect);
+ OUT_RING(chan, nfbdev->gdi.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
- OUT_RING(chan, NvImagePatt);
- OUT_RING(chan, NvRop);
+ OUT_RING(chan, nfbdev->patt.handle);
+ OUT_RING(chan, nfbdev->rop.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
OUT_RING(chan, 1);
BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 94eadd1dd10a..4484131d826a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,8 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -43,7 +41,7 @@ nv04_fence_emit(struct nouveau_fence *fence)
int ret = RING_SPACE(chan, 2);
if (ret == 0) {
BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, fence->base.seqno);
FIRE_RING (chan);
}
return ret;
@@ -59,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence,
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);;
return atomic_read(&fifo->refcnt);
}
@@ -77,7 +75,7 @@ nv04_fence_context_new(struct nouveau_channel *chan)
{
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
- nouveau_fence_context_new(&fctx->base);
+ nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv04_fence_emit;
fctx->base.sync = nv04_fence_sync;
fctx->base.read = nv04_fence_read;
@@ -107,5 +105,7 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv04_fence_destroy;
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
+ priv->base.contexts = 15;
+ priv->base.context_base = fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 06f434f03fba..737d066ffc60 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,9 +22,6 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
@@ -36,7 +33,7 @@ nv10_fence_emit(struct nouveau_fence *fence)
int ret = RING_SPACE(chan, 2);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, fence->base.seqno);
FIRE_RING (chan);
}
return ret;
@@ -53,14 +50,18 @@ nv10_fence_sync(struct nouveau_fence *fence,
u32
nv10_fence_read(struct nouveau_channel *chan)
{
- return nv_ro32(chan->object, 0x0048);
+ return nvif_rd32(chan, 0x0048);
}
void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
+ int i;
nouveau_fence_context_del(&fctx->base);
+ for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
+ nvif_object_fini(&fctx->head[i]);
+ nvif_object_fini(&fctx->sema);
chan->fence = NULL;
kfree(fctx);
}
@@ -74,7 +75,7 @@ nv10_fence_context_new(struct nouveau_channel *chan)
if (!fctx)
return -ENOMEM;
- nouveau_fence_context_new(&fctx->base);
+ nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv10_fence_sync;
@@ -105,6 +106,8 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv10_fence_destroy;
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
+ priv->base.contexts = 31;
+ priv->base.context_base = fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index e5d9204826c2..a87259f3983a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,12 +1,13 @@
#ifndef __NV10_FENCE_H_
#define __NV10_FENCE_H_
-#include <core/os.h>
#include "nouveau_fence.h"
#include "nouveau_bo.h"
struct nv10_fence_chan {
struct nouveau_fence_chan base;
+ struct nvif_object sema;
+ struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 22aa9963ea6f..6f9a1f8e2d0f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -22,8 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -33,11 +33,13 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx = chan->fence;
u32 value;
int ret;
- if (!mutex_trylock(&prev->cli->mutex))
+ if (!mutex_trylock(&cli->mutex))
return -EBUSY;
spin_lock(&priv->lock);
@@ -48,7 +50,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
ret = RING_SPACE(prev, 5);
if (!ret) {
BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, NvSema);
+ OUT_RING (prev, fctx->sema.handle);
OUT_RING (prev, 0);
OUT_RING (prev, value + 0);
OUT_RING (prev, value + 1);
@@ -57,14 +59,14 @@ nv17_fence_sync(struct nouveau_fence *fence,
if (!ret && !(ret = RING_SPACE(chan, 5))) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, fctx->sema.handle);
OUT_RING (chan, 0);
OUT_RING (chan, value + 1);
OUT_RING (chan, value + 2);
FIRE_RING (chan);
}
- mutex_unlock(&prev->cli->mutex);
+ mutex_unlock(&cli->mutex);
return 0;
}
@@ -74,7 +76,6 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret = 0;
@@ -83,20 +84,19 @@ nv17_fence_context_new(struct nouveau_channel *chan)
if (!fctx)
return -ENOMEM;
- nouveau_fence_context_new(&fctx->base);
+ nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->sema);
if (ret)
nv10_fence_context_del(chan);
return ret;
@@ -124,6 +124,8 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
+ priv->base.contexts = 31;
+ priv->base.context_base = fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4c534b7b04da..03949eaa629f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,6 +28,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <nvif/class.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
@@ -37,15 +39,6 @@
#include "nouveau_fence.h"
#include "nv50_display.h"
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/class.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/i2c.h>
-
#define EVO_DMA_NR 9
#define EVO_MASTER (0x00)
@@ -60,45 +53,34 @@
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
-#define EVO_CORE_HANDLE (0xd1500000)
-#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
-#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
-#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
- (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
-
/******************************************************************************
* EVO channel
*****************************************************************************/
struct nv50_chan {
- struct nouveau_object *user;
- u32 handle;
+ struct nvif_object user;
};
static int
-nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
- const u32 handle = EVO_CHAN_HANDLE(bclass, head);
- int ret;
-
- ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
- oclass, data, size, &chan->user);
- if (ret)
- return ret;
-
- chan->handle = handle;
- return 0;
+ while (oclass[0]) {
+ int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
+ oclass[0], data, size,
+ &chan->user);
+ if (oclass++, ret == 0) {
+ nvif_object_map(&chan->user);
+ return ret;
+ }
+ }
+ return -ENOSYS;
}
static void
-nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
+nv50_chan_destroy(struct nv50_chan *chan)
{
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- if (chan->handle)
- nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+ nvif_object_fini(&chan->user);
}
/******************************************************************************
@@ -110,16 +92,70 @@ struct nv50_pioc {
};
static void
-nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
+nv50_pioc_destroy(struct nv50_pioc *pioc)
{
- nv50_chan_destroy(core, &pioc->base);
+ nv50_chan_destroy(&pioc->base);
}
static int
-nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_pioc *pioc)
{
- return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
+ return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+}
+
+/******************************************************************************
+ * Cursor Immediate
+ *****************************************************************************/
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+static int
+nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+{
+ struct nv50_disp_cursor_v0 args = {
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_CURSOR,
+ GF110_DISP_CURSOR,
+ GT214_DISP_CURSOR,
+ G82_DISP_CURSOR,
+ NV50_DISP_CURSOR,
+ 0
+ };
+
+ return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
+ &curs->base);
+}
+
+/******************************************************************************
+ * Overlay Immediate
+ *****************************************************************************/
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+static int
+nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+{
+ struct nv50_disp_cursor_v0 args = {
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_OVERLAY,
+ GF110_DISP_OVERLAY,
+ GT214_DISP_OVERLAY,
+ G82_DISP_OVERLAY,
+ NV50_DISP_OVERLAY,
+ 0
+ };
+
+ return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
+ &oimm->base);
}
/******************************************************************************
@@ -131,6 +167,9 @@ struct nv50_dmac {
dma_addr_t handle;
u32 *ptr;
+ struct nvif_object sync;
+ struct nvif_object vram;
+
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
* dropped again by evo_kick. */
@@ -138,207 +177,113 @@ struct nv50_dmac {
};
static void
-nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
+ nvif_object_fini(&dmac->vram);
+ nvif_object_fini(&dmac->sync);
+
+ nv50_chan_destroy(&dmac->base);
+
if (dmac->ptr) {
- struct pci_dev *pdev = nv_device(core)->pdev;
+ struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev;
pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
-
- nv50_chan_destroy(core, &dmac->base);
-}
-
-static int
-nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB16,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
-}
-
-static int
-nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB16,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
-}
-
-static int
-nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVD0_DMA_CONF0_ENABLE |
- NVD0_DMA_CONF0_PAGE_LP,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
- NVD0_DMA_CONF0_PAGE_LP,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
}
static int
-nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, u64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- u32 pushbuf = *(u32 *)data;
+ struct nvif_device *device = nvif_device(disp);
+ struct nv50_disp_core_channel_dma_v0 *args = data;
+ struct nvif_object pushbuf;
int ret;
mutex_init(&dmac->lock);
- dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
- &dmac->handle);
+ dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev,
+ PAGE_SIZE, &dmac->handle);
if (!dmac->ptr)
return -ENOMEM;
- ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
- NV_DMA_FROM_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_PCI_US |
- NV_DMA_ACCESS_RD,
+ ret = nvif_object_init(nvif_object(device), NULL,
+ args->pushbuf, NV_DMA_FROM_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_PCI_US,
+ .access = NV_DMA_V0_ACCESS_RD,
.start = dmac->handle + 0x0000,
.limit = dmac->handle + 0x0fff,
- }, sizeof(struct nv_dma_class), &object);
+ }, sizeof(struct nv_dma_v0), &pushbuf);
if (ret)
return ret;
- ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+ nvif_object_fini(&pushbuf);
if (ret)
return ret;
- ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
- }, sizeof(struct nv_dma_class), &object);
+ }, sizeof(struct nv_dma_v0),
+ &dmac->sync);
if (ret)
return ret;
- ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
- .limit = pfb->ram->size - 1,
- }, sizeof(struct nv_dma_class), &object);
+ .limit = device->info.ram_user - 1,
+ }, sizeof(struct nv_dma_v0),
+ &dmac->vram);
if (ret)
return ret;
- if (nv_device(core)->card_type < NV_C0)
- ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
- else
- if (nv_device(core)->card_type < NV_D0)
- ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
- else
- ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
return ret;
}
+/******************************************************************************
+ * Core
+ *****************************************************************************/
+
struct nv50_mast {
struct nv50_dmac base;
};
-struct nv50_curs {
- struct nv50_pioc base;
-};
+static int
+nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+{
+ struct nv50_disp_core_channel_dma_v0 args = {
+ .pushbuf = 0xb0007d00,
+ };
+ static const u32 oclass[] = {
+ GM107_DISP_CORE_CHANNEL_DMA,
+ GK110_DISP_CORE_CHANNEL_DMA,
+ GK104_DISP_CORE_CHANNEL_DMA,
+ GF110_DISP_CORE_CHANNEL_DMA,
+ GT214_DISP_CORE_CHANNEL_DMA,
+ GT206_DISP_CORE_CHANNEL_DMA,
+ GT200_DISP_CORE_CHANNEL_DMA,
+ G82_DISP_CORE_CHANNEL_DMA,
+ NV50_DISP_CORE_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
+ &core->base);
+}
+
+/******************************************************************************
+ * Base
+ *****************************************************************************/
struct nv50_sync {
struct nv50_dmac base;
@@ -346,13 +291,58 @@ struct nv50_sync {
u32 data;
};
+static int
+nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
+ struct nv50_sync *base)
+{
+ struct nv50_disp_base_channel_dma_v0 args = {
+ .pushbuf = 0xb0007c00 | head,
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK110_DISP_BASE_CHANNEL_DMA,
+ GK104_DISP_BASE_CHANNEL_DMA,
+ GF110_DISP_BASE_CHANNEL_DMA,
+ GT214_DISP_BASE_CHANNEL_DMA,
+ GT200_DISP_BASE_CHANNEL_DMA,
+ G82_DISP_BASE_CHANNEL_DMA,
+ NV50_DISP_BASE_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ syncbuf, &base->base);
+}
+
+/******************************************************************************
+ * Overlay
+ *****************************************************************************/
+
struct nv50_ovly {
struct nv50_dmac base;
};
-struct nv50_oimm {
- struct nv50_pioc base;
-};
+static int
+nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
+ struct nv50_ovly *ovly)
+{
+ struct nv50_disp_overlay_channel_dma_v0 args = {
+ .pushbuf = 0xb0007e00 | head,
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_OVERLAY_CONTROL_DMA,
+ GF110_DISP_OVERLAY_CONTROL_DMA,
+ GT214_DISP_OVERLAY_CHANNEL_DMA,
+ GT200_DISP_OVERLAY_CHANNEL_DMA,
+ G82_DISP_OVERLAY_CHANNEL_DMA,
+ NV50_DISP_OVERLAY_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ syncbuf, &ovly->base);
+}
struct nv50_head {
struct nouveau_crtc base;
@@ -369,13 +359,19 @@ struct nv50_head {
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
-#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+#define nv50_vers(c) nv50_chan(c)->user.oclass
+
+struct nv50_fbdma {
+ struct list_head head;
+ struct nvif_object core;
+ struct nvif_object base[4];
+};
struct nv50_disp {
- struct nouveau_object *core;
+ struct nvif_object *disp;
struct nv50_mast mast;
- u32 modeset;
+ struct list_head fbdma;
struct nouveau_bo *sync;
};
@@ -401,16 +397,16 @@ static u32 *
evo_wait(void *evoc, int nr)
{
struct nv50_dmac *dmac = evoc;
- u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
- nv_wo32(dmac->base.user, 0x0000, 0x00000000);
- if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
+ if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
mutex_unlock(&dmac->lock);
- NV_ERROR(dmac->base.user, "channel stalled\n");
+ nv_error(nvkm_object(&dmac->base.user), "channel stalled\n");
return NULL;
}
@@ -424,7 +420,7 @@ static void
evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
- nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
@@ -443,7 +439,7 @@ evo_sync_wait(void *data)
static int
evo_sync(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_mast *mast = nv50_mast(dev);
u32 *push = evo_wait(mast, 8);
@@ -455,7 +451,7 @@ evo_sync(struct drm_device *dev)
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_kick(push, mast);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync))
return 0;
}
@@ -490,7 +486,7 @@ nv50_display_flip_wait(void *data)
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(crtc->dev);
+ struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
struct nv50_display_flip flip = {
.disp = nv50_disp(crtc->dev),
.chan = nv50_sync(crtc),
@@ -510,7 +506,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_kick(push, flip.chan);
}
- nv_wait_cb(device, nv50_display_flip_wait, &flip);
+ nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip);
}
int
@@ -534,7 +530,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
- if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+ if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
@@ -548,14 +544,14 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->addr);
OUT_RING (chan, sync->data);
} else
- if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
@@ -606,16 +602,16 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_data(push, sync->addr);
evo_data(push, sync->data++);
evo_data(push, sync->data);
- evo_data(push, NvEvoSync);
+ evo_data(push, sync->base.sync.handle);
evo_mthd(push, 0x00a0, 2);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
+ evo_data(push, nv_fb->r_handle);
evo_mthd(push, 0x0110, 2);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
evo_mthd(push, 0x0800, 5);
evo_data(push, nv_fb->nvbo->bo.offset >> 8);
evo_data(push, 0);
@@ -667,11 +663,11 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
evo_data(push, mode);
} else
- if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
evo_data(push, mode);
} else {
@@ -762,7 +758,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
/*XXX: SCALE_CTRL_ACTIVE??? */
evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
evo_data(push, (oY << 16) | oX);
@@ -807,7 +803,7 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
evo_data(push, (hue << 20) | (vib << 8));
} else {
@@ -835,7 +831,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
evo_data(push, nvfb->nvbo->bo.offset >> 8);
evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
@@ -844,9 +840,9 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_data(push, nvfb->r_format);
evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_dma);
+ evo_data(push, nvfb->r_handle);
}
} else {
evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
@@ -855,7 +851,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_data(push, (fb->height << 16) | fb->width);
evo_data(push, nvfb->r_pitch);
evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
+ evo_data(push, nvfb->r_handle);
evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
evo_data(push, (y << 16) | x);
}
@@ -867,7 +863,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_kick(push, mast);
}
- nv_crtc->fb.tile_flags = nvfb->r_dma;
+ nv_crtc->fb.handle = nvfb->r_handle;
return 0;
}
@@ -877,23 +873,23 @@ nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
u32 *push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
}
evo_kick(push, mast);
}
@@ -905,11 +901,11 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
u32 *push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x05000000);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
@@ -960,13 +956,13 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
push = evo_wait(mast, 6);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
@@ -997,31 +993,31 @@ nv50_crtc_commit(struct drm_crtc *crtc)
push = evo_wait(mast, 32);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM_LP);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0xc0000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0xc0000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
evo_data(push, 0x83000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0xffffff00);
}
@@ -1099,7 +1095,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
push = evo_wait(mast, 64);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x00800000 | mode->clock);
evo_data(push, (ilace == 2) ? 2 : 0);
@@ -1192,7 +1188,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
u16 g = nv_crtc->lut.g[i] >> 2;
u16 b = nv_crtc->lut.b[i] >> 2;
- if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ if (disp->disp->oclass < GF110_DISP) {
writew(r + 0x0000, lut + (i * 0x08) + 0);
writew(g + 0x0000, lut + (i * 0x08) + 2);
writew(b + 0x0000, lut + (i * 0x08) + 4);
@@ -1259,8 +1255,8 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nv50_curs *curs = nv50_curs(crtc);
struct nv50_chan *chan = nv50_chan(curs);
- nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nv_wo32(chan->user, 0x0080, 0x00000000);
+ nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nvif_wr32(&chan->user, 0x0080, 0x00000000);
return 0;
}
@@ -1287,11 +1283,16 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+ struct nv50_fbdma *fbdma;
+
+ list_for_each_entry(fbdma, &disp->fbdma, head) {
+ nvif_object_fini(&fbdma->base[nv_crtc->index]);
+ }
- nv50_dmac_destroy(disp->core, &head->ovly.base);
- nv50_pioc_destroy(disp->core, &head->oimm.base);
- nv50_dmac_destroy(disp->core, &head->sync.base);
- nv50_pioc_destroy(disp->core, &head->curs.base);
+ nv50_dmac_destroy(&head->ovly.base, disp->disp);
+ nv50_pioc_destroy(&head->oimm.base);
+ nv50_dmac_destroy(&head->sync.base, disp->disp);
+ nv50_pioc_destroy(&head->curs.base);
/*XXX: this shouldn't be necessary, but the core doesn't call
* disconnect() during the cleanup paths
@@ -1346,7 +1347,7 @@ nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
}
static int
-nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+nv50_crtc_create(struct drm_device *dev, int index)
{
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
@@ -1395,11 +1396,7 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
nv50_crtc_lut_load(crtc);
/* allocate cursor resources */
- ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
- &(struct nv50_display_curs_class) {
- .head = index,
- }, sizeof(struct nv50_display_curs_class),
- &head->curs.base);
+ ret = nv50_curs_create(disp->disp, index, &head->curs);
if (ret)
goto out;
@@ -1420,12 +1417,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
goto out;
/* allocate page flip / sync resources */
- ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
- &(struct nv50_display_sync_class) {
- .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
- .head = index,
- }, sizeof(struct nv50_display_sync_class),
- disp->sync->bo.offset, &head->sync.base);
+ ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
+ &head->sync);
if (ret)
goto out;
@@ -1433,20 +1426,12 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
head->sync.data = 0x00000000;
/* allocate overlay resources */
- ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
- &(struct nv50_display_oimm_class) {
- .head = index,
- }, sizeof(struct nv50_display_oimm_class),
- &head->oimm.base);
+ ret = nv50_oimm_create(disp->disp, index, &head->oimm);
if (ret)
goto out;
- ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
- &(struct nv50_display_ovly_class) {
- .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
- .head = index,
- }, sizeof(struct nv50_display_ovly_class),
- disp->sync->bo.offset, &head->ovly.base);
+ ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
+ &head->ovly);
if (ret)
goto out;
@@ -1464,16 +1449,23 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x00000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_dac_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_DAC_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = 1,
+ .pwr.data = 1,
+ .pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND &&
+ mode != DRM_MODE_DPMS_OFF),
+ .pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY &&
+ mode != DRM_MODE_DPMS_OFF),
+ };
- nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static bool
@@ -1514,7 +1506,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
u32 syncs = 0x00000000;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1563,7 +1555,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0400 + (or * 0x080), 1);
evo_data(push, 0x00000000);
} else {
@@ -1580,14 +1572,25 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- int ret, or = nouveau_encoder(encoder)->or;
- u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
- if (load == 0)
- load = 340;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_dac_load_v0 load;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
+ int ret;
+
+ args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (args.load.data == 0)
+ args.load.data = 340;
- ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
- if (ret || !load)
+ ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ if (ret || !args.load.load)
return connector_status_disconnected;
return connector_status_connected;
@@ -1619,7 +1622,7 @@ static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
@@ -1650,16 +1653,25 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ u8 data[sizeof(nv_connector->base.eld)];
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
- nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
- nv_connector->base.eld,
- nv_connector->base.eld[2] * 4);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static void
@@ -1667,8 +1679,17 @@ nv50_audio_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
- nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
/******************************************************************************
@@ -1679,10 +1700,20 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
- const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
- u32 rekey = 56; /* binary driver, and tegra constant */
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ .pwr.state = 1,
+ .pwr.rekey = 56, /* binary driver, and tegra, constant */
+ };
+ struct nouveau_connector *nv_connector;
u32 max_ac_packet;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -1690,14 +1721,11 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
return;
max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
+ max_ac_packet -= args.pwr.rekey;
max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
- NV84_DISP_SOR_HDMI_PWR_STATE_ON |
- (max_ac_packet << 16) | rekey);
+ args.pwr.max_ac_packet = max_ac_packet / 32;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
nv50_audio_mode_set(encoder, mode);
}
@@ -1706,11 +1734,20 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
nv50_audio_disconnect(encoder);
- nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
/******************************************************************************
@@ -1720,10 +1757,29 @@ static void
nv50_sor_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ };
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_pwr_v0 pwr;
+ } link = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ };
struct drm_device *dev = encoder->dev;
- struct nv50_disp *disp = nv50_disp(dev);
struct drm_encoder *partner;
- u32 mthd;
nv_encoder->last_dpms = mode;
@@ -1741,18 +1797,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
- mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
- mthd |= nv_encoder->or;
-
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1);
- mthd |= NV94_DISP_SOR_DP_PWR;
+ args.pwr.state = 1;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nvif_mthd(disp->disp, 0, &link, sizeof(link));
} else {
- mthd |= NV50_DISP_SOR_PWR;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-
- nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON));
}
static bool
@@ -1781,7 +1832,7 @@ nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
evo_data(push, (nv_encoder->ctrl = temp));
} else {
@@ -1817,15 +1868,24 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct drm_display_mode *mode)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_lvds_script_v0 lvds;
+ } lvds = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 lvds = 0, mask, ctrl;
+ u32 mask, ctrl;
u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1851,31 +1911,31 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
if (bios->fp.if_is_24bit)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
} else {
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
} else
if (mode->clock >= bios->fp.duallink_transition_clk) {
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
}
- if (lvds & 0x0100) {
+ if (lvds.lvds.script & 0x0100) {
if (bios->fp.strapless_is_24bit & 2)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
} else {
if (bios->fp.strapless_is_24bit & 1)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
}
if (nv_connector->base.display_info.bpc == 8)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
}
- nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
if (nv_connector->base.display_info.bpc == 6) {
@@ -1902,7 +1962,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
- if (nv50_vers(mast) >= NVD0_DISP_CLASS) {
+ if (nv50_vers(mast) >= GF110_DISP) {
u32 *push = evo_wait(mast, 3);
if (push) {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
@@ -1961,7 +2021,7 @@ static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
@@ -2002,9 +2062,19 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
- u32 ctrl = (mode == DRM_MODE_DPMS_ON);
- nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_pior_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_PIOR_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ .pwr.type = nv_encoder->dcb->type,
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static bool
@@ -2067,7 +2137,7 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
u32 ctrl = (depth << 16) | (proto << 8) | owner;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl |= 0x00001000;
@@ -2096,7 +2166,7 @@ nv50_pior_disconnect(struct drm_encoder *encoder)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0700 + (or * 0x040), 1);
evo_data(push, 0x00000000);
}
@@ -2132,7 +2202,7 @@ static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *ddc = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
@@ -2169,8 +2239,151 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
/******************************************************************************
+ * Framebuffer
+ *****************************************************************************/
+
+static void
+nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
+ nvif_object_fini(&fbdma->base[i]);
+ nvif_object_fini(&fbdma->core);
+ list_del(&fbdma->head);
+ kfree(fbdma);
+}
+
+static int
+nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ struct __attribute__ ((packed)) {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf110_dma_v0 gf110;
+ };
+ } args = {};
+ struct nv50_fbdma *fbdma;
+ struct drm_crtc *crtc;
+ u32 size = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(fbdma, &disp->fbdma, head) {
+ if (fbdma->core.handle == name)
+ return 0;
+ }
+
+ fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
+ if (!fbdma)
+ return -ENOMEM;
+ list_add(&fbdma->head, &disp->fbdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = offset;
+ args.base.limit = offset + length - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ size += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ size += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ size += sizeof(args.gf100);
+ } else {
+ args.gf110.page = GF110_DMA_V0_PAGE_LP;
+ args.gf110.kind = kind;
+ size += sizeof(args.gf110);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nv50_head *head = nv50_head(crtc);
+ int ret = nvif_object_init(&head->sync.base.base.user, NULL,
+ name, NV_DMA_IN_MEMORY, &args, size,
+ &fbdma->base[head->base.index]);
+ if (ret) {
+ nv50_fbdma_fini(fbdma);
+ return ret;
+ }
+ }
+
+ ret = nvif_object_init(&mast->base.base.user, NULL, name,
+ NV_DMA_IN_MEMORY, &args, size,
+ &fbdma->core);
+ if (ret) {
+ nv50_fbdma_fini(fbdma);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nv50_fb_dtor(struct drm_framebuffer *fb)
+{
+}
+
+static int
+nv50_fb_ctor(struct drm_framebuffer *fb)
+{
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
+ struct nouveau_bo *nvbo = nv_fb->nvbo;
+ struct nv50_disp *disp = nv50_disp(fb->dev);
+ u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
+ u8 tile = nvbo->tile_mode;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ NV_ERROR(drm, "framebuffer requires contiguous bo\n");
+ return -EINVAL;
+ }
+
+ if (drm->device.info.chipset >= 0xc0)
+ tile >>= 4; /* yep.. */
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
+ case 24:
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
+ default:
+ NV_ERROR(drm, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (disp->disp->oclass < G82_DISP) {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x00100000);
+ nv_fb->r_format |= kind << 16;
+ } else
+ if (disp->disp->oclass < GF110_DISP) {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x00100000);
+ } else {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x01000000);
+ }
+ nv_fb->r_handle = 0xffff0000 | kind;
+
+ return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
+ drm->device.info.ram_user, kind);
+}
+
+/******************************************************************************
* Init
*****************************************************************************/
+
void
nv50_display_fini(struct drm_device *dev)
{
@@ -2193,7 +2406,7 @@ nv50_display_init(struct drm_device *dev)
}
evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
+ evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
return 0;
}
@@ -2202,8 +2415,13 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_fbdma *fbdma, *fbtmp;
+
+ list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
+ nv50_fbdma_fini(fbdma);
+ }
- nv50_dmac_destroy(disp->core, &disp->mast.base);
+ nv50_dmac_destroy(&disp->mast.base, disp->disp);
nouveau_bo_unmap(disp->sync);
if (disp->sync)
@@ -2217,7 +2435,7 @@ nv50_display_destroy(struct drm_device *dev)
int
nv50_display_create(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
@@ -2228,12 +2446,15 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
+ INIT_LIST_HEAD(&disp->fbdma);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- disp->core = nouveau_display(dev)->core;
+ nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
+ nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
+ disp->disp = &nouveau_display(dev)->disp;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2253,22 +2474,19 @@ nv50_display_create(struct drm_device *dev)
goto out;
/* allocate master evo channel */
- ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
- &(struct nv50_display_mast_class) {
- .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
- }, sizeof(struct nv50_display_mast_class),
- disp->sync->bo.offset, &disp->mast.base);
+ ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+ &disp->mast);
if (ret)
goto out;
/* create crtc objects to represent the hw heads */
- if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
- crtcs = nv_rd32(device, 0x022448);
+ if (disp->disp->oclass >= GF110_DISP)
+ crtcs = nvif_rd32(device, 0x022448);
else
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, disp->core, i);
+ ret = nv50_crtc_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 52068a0910dc..394c89abcc97 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -154,7 +154,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_object *object;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -184,8 +183,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
- 0x502d, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+ &nfbdev->twod);
if (ret)
return ret;
@@ -196,11 +195,11 @@ nv50_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
- OUT_RING(chan, Nv2D);
+ OUT_RING(chan, nfbdev->twod.handle);
BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 0ee363840035..08fad3668a1c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -22,8 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -38,7 +38,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret, i;
@@ -47,20 +46,19 @@ nv50_fence_context_new(struct nouveau_channel *chan)
if (!fctx)
return -ENOMEM;
- nouveau_fence_context_new(&fctx->base);
+ nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->sema);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
@@ -68,15 +66,14 @@ nv50_fence_context_new(struct nouveau_channel *chan)
u32 start = bo->bo.mem.start * PAGE_SIZE;
u32 limit = start + bo->bo.mem.size - 1;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+ NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->head[i]);
}
if (ret)
@@ -98,6 +95,8 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
+ priv->base.contexts = 127;
+ priv->base.context_base = fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 9fd475c89820..a2f28082c272 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,12 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/class.h>
-
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -47,7 +41,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
int ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
OUT_RING (chan, upper_32_bits(virtual));
OUT_RING (chan, lower_32_bits(virtual));
@@ -65,7 +59,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(virtual));
OUT_RING (chan, lower_32_bits(virtual));
@@ -81,15 +75,14 @@ nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- u64 addr = fifo->chid * 16;
+ u64 addr = chan->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
else
addr += fctx->vma.offset;
- return fctx->base.emit32(chan, addr, fence->sequence);
+ return fctx->base.emit32(chan, addr, fence->base.seqno);
}
static int
@@ -97,23 +90,21 @@ nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nv84_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
- u64 addr = fifo->chid * 16;
+ u64 addr = prev->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
else
addr += fctx->vma.offset;
- return fctx->base.sync32(chan, addr, fence->sequence);
+ return fctx->base.sync32(chan, addr, fence->base.seqno);
}
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
+ return nouveau_bo_rd32(priv->bo, chan->chid * 16/4);
}
static void
@@ -139,8 +130,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nouveau_client *client = nouveau_client(fifo);
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret, i;
@@ -149,26 +139,27 @@ nv84_fence_context_new(struct nouveau_channel *chan)
if (!fctx)
return -ENOMEM;
- nouveau_fence_context_new(&fctx->base);
+ nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv84_fence_emit;
fctx->base.sync = nv84_fence_sync;
fctx->base.read = nv84_fence_read;
fctx->base.emit32 = nv84_fence_emit32;
fctx->base.sync32 = nv84_fence_sync32;
+ fctx->base.sequence = nv84_fence_read(chan);
- ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
- ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+ ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+ ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
}
- nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+ nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000);
if (ret)
nv84_fence_context_del(chan);
@@ -178,13 +169,12 @@ nv84_fence_context_new(struct nouveau_channel *chan)
static bool
nv84_fence_suspend(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv = drm->fence;
int i;
- priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+ priv->suspend = vmalloc(priv->base.contexts * sizeof(u32));
if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
+ for (i = 0; i < priv->base.contexts; i++)
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
}
@@ -194,12 +184,11 @@ nv84_fence_suspend(struct nouveau_drm *drm)
static void
nv84_fence_resume(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv = drm->fence;
int i;
if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
+ for (i = 0; i < priv->base.contexts; i++)
nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
vfree(priv->suspend);
priv->suspend = NULL;
@@ -225,7 +214,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
int
nv84_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
struct nv84_fence_priv *priv;
int ret;
@@ -239,10 +228,11 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- init_waitqueue_head(&priv->base.waiting);
+ priv->base.contexts = pfifo->max + 1;
+ priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
- ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -256,7 +246,7 @@ nv84_fence_create(struct nouveau_drm *drm)
}
if (ret == 0)
- ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_TT, 0, 0, NULL,
&priv->bo_gart);
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 9dcd30f3e1e0..61246677e8dc 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -154,11 +154,10 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_object *object;
int ret, format;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
- 0x902d, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+ &nfbdev->twod);
if (ret)
return ret;
@@ -197,7 +196,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
- OUT_RING (chan, 0x0000902d);
+ OUT_RING (chan, nfbdev->twod.handle);
BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 9566267fbc42..becf19abda2d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,12 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/class.h>
-
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
new file mode 100644
index 000000000000..573491f84792
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -0,0 +1,558 @@
+#ifndef __NVIF_CLASS_H__
+#define __NVIF_CLASS_H__
+
+/*******************************************************************************
+ * class identifiers
+ ******************************************************************************/
+
+/* the below match nvidia-assigned (either in hw, or sw) class numbers */
+#define NV_DEVICE 0x00000080
+
+#define NV_DMA_FROM_MEMORY 0x00000002
+#define NV_DMA_TO_MEMORY 0x00000003
+#define NV_DMA_IN_MEMORY 0x0000003d
+
+#define NV04_DISP 0x00000046
+
+#define NV03_CHANNEL_DMA 0x0000006b
+#define NV10_CHANNEL_DMA 0x0000006e
+#define NV17_CHANNEL_DMA 0x0000176e
+#define NV40_CHANNEL_DMA 0x0000406e
+#define NV50_CHANNEL_DMA 0x0000506e
+#define G82_CHANNEL_DMA 0x0000826e
+
+#define NV50_CHANNEL_GPFIFO 0x0000506f
+#define G82_CHANNEL_GPFIFO 0x0000826f
+#define FERMI_CHANNEL_GPFIFO 0x0000906f
+#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f
+
+#define NV50_DISP 0x00005070
+#define G82_DISP 0x00008270
+#define GT200_DISP 0x00008370
+#define GT214_DISP 0x00008570
+#define GT206_DISP 0x00008870
+#define GF110_DISP 0x00009070
+#define GK104_DISP 0x00009170
+#define GK110_DISP 0x00009270
+#define GM107_DISP 0x00009470
+
+#define NV50_DISP_CURSOR 0x0000507a
+#define G82_DISP_CURSOR 0x0000827a
+#define GT214_DISP_CURSOR 0x0000857a
+#define GF110_DISP_CURSOR 0x0000907a
+#define GK104_DISP_CURSOR 0x0000917a
+
+#define NV50_DISP_OVERLAY 0x0000507b
+#define G82_DISP_OVERLAY 0x0000827b
+#define GT214_DISP_OVERLAY 0x0000857b
+#define GF110_DISP_OVERLAY 0x0000907b
+#define GK104_DISP_OVERLAY 0x0000917b
+
+#define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c
+#define G82_DISP_BASE_CHANNEL_DMA 0x0000827c
+#define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c
+#define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c
+#define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c
+#define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c
+#define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c
+
+#define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d
+#define G82_DISP_CORE_CHANNEL_DMA 0x0000827d
+#define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d
+#define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d
+#define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d
+#define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d
+#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
+#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
+#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
+
+#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
+#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
+#define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e
+#define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e
+#define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e
+#define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e
+
+#define FERMI_A 0x00009097
+#define FERMI_B 0x00009197
+#define FERMI_C 0x00009297
+
+#define KEPLER_A 0x0000a097
+#define KEPLER_B 0x0000a197
+#define KEPLER_C 0x0000a297
+
+#define MAXWELL_A 0x0000b097
+
+#define FERMI_COMPUTE_A 0x000090c0
+#define FERMI_COMPUTE_B 0x000091c0
+
+#define KEPLER_COMPUTE_A 0x0000a0c0
+#define KEPLER_COMPUTE_B 0x0000a1c0
+
+#define MAXWELL_COMPUTE_A 0x0000b0c0
+
+
+/*******************************************************************************
+ * client
+ ******************************************************************************/
+
+#define NV_CLIENT_DEVLIST 0x00
+
+struct nv_client_devlist_v0 {
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ __u64 device[];
+};
+
+
+/*******************************************************************************
+ * device
+ ******************************************************************************/
+
+struct nv_device_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 device; /* device identifier, ~0 for client default */
+#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL
+#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL
+#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL
+#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
+#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
+#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
+#define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL
+#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
+#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
+#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
+#define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL
+#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
+#define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
+#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
+#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
+ __u64 disable; /* disable particular subsystems */
+ __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
+};
+
+#define NV_DEVICE_V0_INFO 0x00
+
+struct nv_device_info_v0 {
+ __u8 version;
+#define NV_DEVICE_INFO_V0_IGP 0x00
+#define NV_DEVICE_INFO_V0_PCI 0x01
+#define NV_DEVICE_INFO_V0_AGP 0x02
+#define NV_DEVICE_INFO_V0_PCIE 0x03
+#define NV_DEVICE_INFO_V0_SOC 0x04
+ __u8 platform;
+ __u16 chipset; /* from NV_PMC_BOOT_0 */
+ __u8 revision; /* from NV_PMC_BOOT_0 */
+#define NV_DEVICE_INFO_V0_TNT 0x01
+#define NV_DEVICE_INFO_V0_CELSIUS 0x02
+#define NV_DEVICE_INFO_V0_KELVIN 0x03
+#define NV_DEVICE_INFO_V0_RANKINE 0x04
+#define NV_DEVICE_INFO_V0_CURIE 0x05
+#define NV_DEVICE_INFO_V0_TESLA 0x06
+#define NV_DEVICE_INFO_V0_FERMI 0x07
+#define NV_DEVICE_INFO_V0_KEPLER 0x08
+#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+ __u8 family;
+ __u8 pad06[2];
+ __u64 ram_size;
+ __u64 ram_user;
+};
+
+
+/*******************************************************************************
+ * context dma
+ ******************************************************************************/
+
+struct nv_dma_v0 {
+ __u8 version;
+#define NV_DMA_V0_TARGET_VM 0x00
+#define NV_DMA_V0_TARGET_VRAM 0x01
+#define NV_DMA_V0_TARGET_PCI 0x02
+#define NV_DMA_V0_TARGET_PCI_US 0x03
+#define NV_DMA_V0_TARGET_AGP 0x04
+ __u8 target;
+#define NV_DMA_V0_ACCESS_VM 0x00
+#define NV_DMA_V0_ACCESS_RD 0x01
+#define NV_DMA_V0_ACCESS_WR 0x02
+#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
+ __u8 access;
+ __u8 pad03[5];
+ __u64 start;
+ __u64 limit;
+ /* ... chipset-specific class data */
+};
+
+struct nv50_dma_v0 {
+ __u8 version;
+#define NV50_DMA_V0_PRIV_VM 0x00
+#define NV50_DMA_V0_PRIV_US 0x01
+#define NV50_DMA_V0_PRIV__S 0x02
+ __u8 priv;
+#define NV50_DMA_V0_PART_VM 0x00
+#define NV50_DMA_V0_PART_256 0x01
+#define NV50_DMA_V0_PART_1KB 0x02
+ __u8 part;
+#define NV50_DMA_V0_COMP_NONE 0x00
+#define NV50_DMA_V0_COMP_1 0x01
+#define NV50_DMA_V0_COMP_2 0x02
+#define NV50_DMA_V0_COMP_VM 0x03
+ __u8 comp;
+#define NV50_DMA_V0_KIND_PITCH 0x00
+#define NV50_DMA_V0_KIND_VM 0x7f
+ __u8 kind;
+ __u8 pad05[3];
+};
+
+struct gf100_dma_v0 {
+ __u8 version;
+#define GF100_DMA_V0_PRIV_VM 0x00
+#define GF100_DMA_V0_PRIV_US 0x01
+#define GF100_DMA_V0_PRIV__S 0x02
+ __u8 priv;
+#define GF100_DMA_V0_KIND_PITCH 0x00
+#define GF100_DMA_V0_KIND_VM 0xff
+ __u8 kind;
+ __u8 pad03[5];
+};
+
+struct gf110_dma_v0 {
+ __u8 version;
+#define GF110_DMA_V0_PAGE_LP 0x00
+#define GF110_DMA_V0_PAGE_SP 0x01
+ __u8 page;
+#define GF110_DMA_V0_KIND_PITCH 0x00
+#define GF110_DMA_V0_KIND_VM 0xff
+ __u8 kind;
+ __u8 pad03[5];
+};
+
+
+/*******************************************************************************
+ * perfmon
+ ******************************************************************************/
+
+struct nvif_perfctr_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 logic_op;
+ __u8 pad04[4];
+ char name[4][64];
+};
+
+#define NVIF_PERFCTR_V0_QUERY 0x00
+#define NVIF_PERFCTR_V0_SAMPLE 0x01
+#define NVIF_PERFCTR_V0_READ 0x02
+
+struct nvif_perfctr_query_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 iter;
+ char name[64];
+};
+
+struct nvif_perfctr_sample {
+};
+
+struct nvif_perfctr_read_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u32 ctr;
+ __u32 clk;
+};
+
+
+/*******************************************************************************
+ * device control
+ ******************************************************************************/
+
+#define NVIF_CONTROL_PSTATE_INFO 0x00
+#define NVIF_CONTROL_PSTATE_ATTR 0x01
+#define NVIF_CONTROL_PSTATE_USER 0x02
+
+struct nvif_control_pstate_info_v0 {
+ __u8 version;
+ __u8 count; /* out: number of power states */
+#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1)
+#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2)
+ __s8 ustate_ac; /* out: target pstate index */
+ __s8 ustate_dc; /* out: target pstate index */
+ __s8 pwrsrc; /* out: current power source */
+#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1)
+#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2)
+ __s8 pstate; /* out: current pstate index */
+ __u8 pad06[2];
+};
+
+struct nvif_control_pstate_attr_v0 {
+ __u8 version;
+#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1)
+ __s8 state; /* in: index of pstate to query
+ * out: pstate identifier
+ */
+ __u8 index; /* in: index of attribute to query
+ * out: index of next attribute, or 0 if no more
+ */
+ __u8 pad03[5];
+ __u32 min;
+ __u32 max;
+ char name[32];
+ char unit[16];
+};
+
+struct nvif_control_pstate_user_v0 {
+ __u8 version;
+#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1)
+#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2)
+ __s8 ustate; /* in: pstate identifier */
+ __s8 pwrsrc; /* in: target power source */
+ __u8 pad03[5];
+};
+
+
+/*******************************************************************************
+ * DMA FIFO channels
+ ******************************************************************************/
+
+struct nv03_channel_dma_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad02[2];
+ __u32 pushbuf;
+ __u64 offset;
+};
+
+#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
+
+/*******************************************************************************
+ * GPFIFO channels
+ ******************************************************************************/
+
+struct nv50_channel_gpfifo_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad01[6];
+ __u32 pushbuf;
+ __u32 ilength;
+ __u64 ioffset;
+};
+
+struct kepler_channel_gpfifo_a_v0 {
+ __u8 version;
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
+ __u8 engine;
+ __u16 chid;
+ __u8 pad04[4];
+ __u32 pushbuf;
+ __u32 ilength;
+ __u64 ioffset;
+};
+
+/*******************************************************************************
+ * legacy display
+ ******************************************************************************/
+
+#define NV04_DISP_NTFY_VBLANK 0x00
+#define NV04_DISP_NTFY_CONN 0x01
+
+struct nv04_disp_mthd_v0 {
+ __u8 version;
+#define NV04_DISP_SCANOUTPOS 0x00
+ __u8 method;
+ __u8 head;
+ __u8 pad03[5];
+};
+
+struct nv04_disp_scanoutpos_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __s64 time[2];
+ __u16 vblanks;
+ __u16 vblanke;
+ __u16 vtotal;
+ __u16 vline;
+ __u16 hblanks;
+ __u16 hblanke;
+ __u16 htotal;
+ __u16 hline;
+};
+
+/*******************************************************************************
+ * display
+ ******************************************************************************/
+
+#define NV50_DISP_MTHD 0x00
+
+struct nv50_disp_mthd_v0 {
+ __u8 version;
+#define NV50_DISP_SCANOUTPOS 0x00
+ __u8 method;
+ __u8 head;
+ __u8 pad03[5];
+};
+
+struct nv50_disp_mthd_v1 {
+ __u8 version;
+#define NV50_DISP_MTHD_V1_DAC_PWR 0x10
+#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11
+#define NV50_DISP_MTHD_V1_SOR_PWR 0x20
+#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21
+#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
+#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
+#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
+ __u8 method;
+ __u16 hasht;
+ __u16 hashm;
+ __u8 pad06[2];
+};
+
+struct nv50_disp_dac_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 data;
+ __u8 vsync;
+ __u8 hsync;
+ __u8 pad05[3];
+};
+
+struct nv50_disp_dac_load_v0 {
+ __u8 version;
+ __u8 load;
+ __u8 pad02[2];
+ __u32 data;
+};
+
+struct nv50_disp_sor_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_hda_eld_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u8 data[];
+};
+
+struct nv50_disp_sor_hdmi_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 max_ac_packet;
+ __u8 rekey;
+ __u8 pad04[4];
+};
+
+struct nv50_disp_sor_lvds_script_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 script;
+ __u8 pad04[4];
+};
+
+struct nv50_disp_sor_dp_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_pior_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 type;
+ __u8 pad03[5];
+};
+
+/* core */
+struct nv50_disp_core_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 pushbuf;
+};
+
+/* cursor immediate */
+struct nv50_disp_cursor_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+/* base */
+struct nv50_disp_base_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[2];
+ __u8 head;
+ __u32 pushbuf;
+};
+
+/* overlay */
+struct nv50_disp_overlay_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[2];
+ __u8 head;
+ __u32 pushbuf;
+};
+
+/* overlay immediate */
+struct nv50_disp_overlay_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+
+/*******************************************************************************
+ * fermi
+ ******************************************************************************/
+
+#define FERMI_A_ZBC_COLOR 0x00
+#define FERMI_A_ZBC_DEPTH 0x01
+
+struct fermi_a_zbc_color_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
+#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
+#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
+#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
+#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
+#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
+#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
+#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
+#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds[4];
+ __u32 l2[4];
+};
+
+struct fermi_a_zbc_depth_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds;
+ __u32 l2;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
new file mode 100644
index 000000000000..3c4df1fc26dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "client.h"
+#include "driver.h"
+#include "ioctl.h"
+
+int
+nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
+{
+ return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+}
+
+int
+nvif_client_suspend(struct nvif_client *client)
+{
+ return client->driver->suspend(client->base.priv);
+}
+
+int
+nvif_client_resume(struct nvif_client *client)
+{
+ return client->driver->resume(client->base.priv);
+}
+
+void
+nvif_client_fini(struct nvif_client *client)
+{
+ if (client->driver) {
+ client->driver->fini(client->base.priv);
+ client->driver = NULL;
+ client->base.parent = NULL;
+ nvif_object_fini(&client->base);
+ }
+}
+
+const struct nvif_driver *
+nvif_drivers[] = {
+#ifdef __KERNEL__
+ &nvif_driver_nvkm,
+#else
+ &nvif_driver_drm,
+ &nvif_driver_lib,
+#endif
+ NULL
+};
+
+int
+nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
+ const char *name, u64 device, const char *cfg, const char *dbg,
+ struct nvif_client *client)
+{
+ int ret, i;
+
+ ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+ if (ret)
+ return ret;
+
+ client->base.parent = &client->base;
+ client->base.handle = ~0;
+ client->object = &client->base;
+ client->super = true;
+
+ for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
+ if (!driver || !strcmp(client->driver->name, driver)) {
+ ret = client->driver->init(name, device, cfg, dbg,
+ &client->base.priv);
+ if (!ret || driver)
+ break;
+ }
+ }
+
+ if (ret)
+ nvif_client_fini(client);
+ return ret;
+}
+
+static void
+nvif_client_del(struct nvif_client *client)
+{
+ nvif_client_fini(client);
+ kfree(client);
+}
+
+int
+nvif_client_new(const char *driver, const char *name, u64 device,
+ const char *cfg, const char *dbg,
+ struct nvif_client **pclient)
+{
+ struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client) {
+ int ret = nvif_client_init(nvif_client_del, driver, name,
+ device, cfg, dbg, client);
+ if (ret) {
+ kfree(client);
+ client = NULL;
+ }
+ *pclient = client;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+void
+nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
+{
+ nvif_object_ref(&client->base, (struct nvif_object **)pclient);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/nvif/client.h
new file mode 100644
index 000000000000..28352f0882ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.h
@@ -0,0 +1,39 @@
+#ifndef __NVIF_CLIENT_H__
+#define __NVIF_CLIENT_H__
+
+#include "object.h"
+
+struct nvif_client {
+ struct nvif_object base;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ const struct nvif_driver *driver;
+ bool super;
+};
+
+static inline struct nvif_client *
+nvif_client(struct nvif_object *object)
+{
+ while (object && object->parent != object)
+ object = object->parent;
+ return (void *)object;
+}
+
+int nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
+ const char *, u64, const char *, const char *,
+ struct nvif_client *);
+void nvif_client_fini(struct nvif_client *);
+int nvif_client_new(const char *, const char *, u64, const char *,
+ const char *, struct nvif_client **);
+void nvif_client_ref(struct nvif_client *, struct nvif_client **);
+int nvif_client_ioctl(struct nvif_client *, void *, u32);
+int nvif_client_suspend(struct nvif_client *);
+int nvif_client_resume(struct nvif_client *);
+
+/*XXX*/
+#include <core/client.h>
+#define nvkm_client(a) ({ \
+ struct nvif_client *_client = nvif_client(nvif_object(a)); \
+ nouveau_client(_client->base.priv); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
new file mode 100644
index 000000000000..f477579725e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "device.h"
+
+void
+nvif_device_fini(struct nvif_device *device)
+{
+ nvif_object_fini(&device->base);
+}
+
+int
+nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
+ u32 handle, u32 oclass, void *data, u32 size,
+ struct nvif_device *device)
+{
+ int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
+ data, size, &device->base);
+ if (ret == 0) {
+ device->object = &device->base;
+ device->info.version = 0;
+ ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+ &device->info, sizeof(device->info));
+ }
+ return ret;
+}
+
+static void
+nvif_device_del(struct nvif_device *device)
+{
+ nvif_device_fini(device);
+ kfree(device);
+}
+
+int
+nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
+ void *data, u32 size, struct nvif_device **pdevice)
+{
+ struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (device) {
+ int ret = nvif_device_init(parent, nvif_device_del, handle,
+ oclass, data, size, device);
+ if (ret) {
+ kfree(device);
+ device = NULL;
+ }
+ *pdevice = device;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+void
+nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
+{
+ nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h
new file mode 100644
index 000000000000..43180f9fe630
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.h
@@ -0,0 +1,62 @@
+#ifndef __NVIF_DEVICE_H__
+#define __NVIF_DEVICE_H__
+
+#include "object.h"
+#include "class.h"
+
+struct nvif_device {
+ struct nvif_object base;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct nv_device_info_v0 info;
+};
+
+static inline struct nvif_device *
+nvif_device(struct nvif_object *object)
+{
+ while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
+ object = object->parent;
+ return (void *)object;
+}
+
+int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
+ u32 handle, u32 oclass, void *, u32,
+ struct nvif_device *);
+void nvif_device_fini(struct nvif_device *);
+int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
+ void *, u32, struct nvif_device **);
+void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+
+/*XXX*/
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/gpio.h>
+#include <subdev/clock.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+#define nvkm_device(a) nv_device(nvkm_object((a)))
+#define nvkm_bios(a) nouveau_bios(nvkm_device(a))
+#define nvkm_fb(a) nouveau_fb(nvkm_device(a))
+#define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a))
+#define nvkm_bar(a) nouveau_bar(nvkm_device(a))
+#define nvkm_gpio(a) nouveau_gpio(nvkm_device(a))
+#define nvkm_clock(a) nouveau_clock(nvkm_device(a))
+#define nvkm_i2c(a) nouveau_i2c(nvkm_device(a))
+#define nvkm_timer(a) nouveau_timer(nvkm_device(a))
+#define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d))
+#define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c))
+#define nvkm_therm(a) nouveau_therm(nvkm_device(a))
+
+#include <engine/device.h>
+#include <engine/fifo.h>
+#include <engine/graph.h>
+#include <engine/software.h>
+
+#define nvkm_fifo(a) nouveau_fifo(nvkm_device(a))
+#define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a))
+#define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
new file mode 100644
index 000000000000..b72a8f0c2758
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_DRIVER_H__
+#define __NVIF_DRIVER_H__
+
+struct nvif_driver {
+ const char *name;
+ int (*init)(const char *name, u64 device, const char *cfg,
+ const char *dbg, void **priv);
+ void (*fini)(void *priv);
+ int (*suspend)(void *priv);
+ int (*resume)(void *priv);
+ int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+ void *(*map)(void *priv, u64 handle, u32 size);
+ void (*unmap)(void *priv, void *ptr, u32 size);
+ bool keep;
+};
+
+extern const struct nvif_driver nvif_driver_nvkm;
+extern const struct nvif_driver nvif_driver_drm;
+extern const struct nvif_driver nvif_driver_lib;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/nvif/event.h
new file mode 100644
index 000000000000..21764499b4be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/event.h
@@ -0,0 +1,62 @@
+#ifndef __NVIF_EVENT_H__
+#define __NVIF_EVENT_H__
+
+struct nvif_notify_req_v0 {
+ __u8 version;
+ __u8 reply;
+ __u8 pad02[5];
+#define NVIF_NOTIFY_V0_ROUTE_NVIF 0x00
+ __u8 route;
+ __u64 token; /* must be unique */
+ __u8 data[]; /* request data (below) */
+};
+
+struct nvif_notify_rep_v0 {
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u8 data[]; /* reply data (below) */
+};
+
+struct nvif_notify_head_req_v0 {
+ /* nvif_notify_req ... */
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+struct nvif_notify_head_rep_v0 {
+ /* nvif_notify_rep ... */
+ __u8 version;
+ __u8 pad01[7];
+};
+
+struct nvif_notify_conn_req_v0 {
+ /* nvif_notify_req ... */
+ __u8 version;
+#define NVIF_NOTIFY_CONN_V0_PLUG 0x01
+#define NVIF_NOTIFY_CONN_V0_UNPLUG 0x02
+#define NVIF_NOTIFY_CONN_V0_IRQ 0x04
+#define NVIF_NOTIFY_CONN_V0_ANY 0x07
+ __u8 mask;
+ __u8 conn;
+ __u8 pad03[5];
+};
+
+struct nvif_notify_conn_rep_v0 {
+ /* nvif_notify_rep ... */
+ __u8 version;
+ __u8 mask;
+ __u8 pad02[6];
+};
+
+struct nvif_notify_uevent_req {
+ /* nvif_notify_req ... */
+};
+
+struct nvif_notify_uevent_rep {
+ /* nvif_notify_rep ... */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/nvif/ioctl.h
new file mode 100644
index 000000000000..4cd8e323b23d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/ioctl.h
@@ -0,0 +1,128 @@
+#ifndef __NVIF_IOCTL_H__
+#define __NVIF_IOCTL_H__
+
+struct nvif_ioctl_v0 {
+ __u8 version;
+#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
+#define NVIF_IOCTL_V0_OWNER_ANY 0xff
+ __u8 owner;
+#define NVIF_IOCTL_V0_NOP 0x00
+#define NVIF_IOCTL_V0_SCLASS 0x01
+#define NVIF_IOCTL_V0_NEW 0x02
+#define NVIF_IOCTL_V0_DEL 0x03
+#define NVIF_IOCTL_V0_MTHD 0x04
+#define NVIF_IOCTL_V0_RD 0x05
+#define NVIF_IOCTL_V0_WR 0x06
+#define NVIF_IOCTL_V0_MAP 0x07
+#define NVIF_IOCTL_V0_UNMAP 0x08
+#define NVIF_IOCTL_V0_NTFY_NEW 0x09
+#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
+#define NVIF_IOCTL_V0_NTFY_GET 0x0b
+#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
+ __u8 type;
+ __u8 path_nr;
+#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
+#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
+ __u8 pad04[3];
+ __u8 route;
+ __u64 token;
+ __u32 path[8]; /* in reverse */
+ __u8 data[]; /* ioctl data (below) */
+};
+
+struct nvif_ioctl_nop {
+};
+
+struct nvif_ioctl_sclass_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ __u32 oclass[];
+};
+
+struct nvif_ioctl_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u32 handle;
+/* these class numbers are made up by us, and not nvidia-assigned */
+#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff
+#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe
+ __u32 oclass;
+ __u8 data[]; /* class data (class.h) */
+};
+
+struct nvif_ioctl_del {
+};
+
+struct nvif_ioctl_rd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_wr_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_map_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[3];
+ __u32 length;
+ __u64 handle;
+};
+
+struct nvif_ioctl_unmap {
+};
+
+struct nvif_ioctl_ntfy_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 event;
+ __u8 index;
+ __u8 pad03[5];
+ __u8 data[]; /* event request data (event.h) */
+};
+
+struct nvif_ioctl_ntfy_del_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_get_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_put_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_mthd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ __u8 data[]; /* method data (class.h) */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/nvif/list.h
new file mode 100644
index 000000000000..8af5d144ecb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/list.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ * Copyright © 2010 Francisco Jerez <currojerez@riseup.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/* Modified by Ben Skeggs <bskeggs@redhat.com> to match kernel list APIs */
+
+#ifndef _XORG_LIST_H_
+#define _XORG_LIST_H_
+
+/**
+ * @file Classic doubly-link circular list implementation.
+ * For real usage examples of the linked list, see the file test/list.c
+ *
+ * Example:
+ * We need to keep a list of struct foo in the parent struct bar, i.e. what
+ * we want is something like this.
+ *
+ * struct bar {
+ * ...
+ * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{}
+ * ...
+ * }
+ *
+ * We need one list head in bar and a list element in all list_of_foos (both are of
+ * data type 'struct list_head').
+ *
+ * struct bar {
+ * ...
+ * struct list_head list_of_foos;
+ * ...
+ * }
+ *
+ * struct foo {
+ * ...
+ * struct list_head entry;
+ * ...
+ * }
+ *
+ * Now we initialize the list head:
+ *
+ * struct bar bar;
+ * ...
+ * INIT_LIST_HEAD(&bar.list_of_foos);
+ *
+ * Then we create the first element and add it to this list:
+ *
+ * struct foo *foo = malloc(...);
+ * ....
+ * list_add(&foo->entry, &bar.list_of_foos);
+ *
+ * Repeat the above for each element you want to add to the list. Deleting
+ * works with the element itself.
+ * list_del(&foo->entry);
+ * free(foo);
+ *
+ * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty
+ * list again.
+ *
+ * Looping through the list requires a 'struct foo' as iterator and the
+ * name of the field the subnodes use.
+ *
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
+ * if (iterator->something == ...)
+ * ...
+ * }
+ *
+ * Note: You must not call list_del() on the iterator if you continue the
+ * loop. You need to run the safe for-each loop instead:
+ *
+ * struct foo *iterator, *next;
+ * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
+ * if (...)
+ * list_del(&iterator->entry);
+ * }
+ *
+ */
+
+/**
+ * The linkage struct for list nodes. This struct must be part of your
+ * to-be-linked struct. struct list_head is required for both the head of the
+ * list and for each list node.
+ *
+ * Position and name of the struct list_head field is irrelevant.
+ * There are no requirements that elements of a list are of the same type.
+ * There are no requirements for a list head, any struct list_head can be a list
+ * head.
+ */
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+/**
+ * Initialize the list as an empty list.
+ *
+ * Example:
+ * INIT_LIST_HEAD(&bar->list_of_foos);
+ *
+ * @param The list to initialized.
+ */
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void
+INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list->prev = list;
+}
+
+static inline void
+__list_add(struct list_head *entry,
+ struct list_head *prev, struct list_head *next)
+{
+ next->prev = entry;
+ entry->next = next;
+ entry->prev = prev;
+ prev->next = entry;
+}
+
+/**
+ * Insert a new element after the given list head. The new element does not
+ * need to be initialised as empty list.
+ * The list changes from:
+ * head → some element → ...
+ * to
+ * head → new element → older element → ...
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_add(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_add(struct list_head *entry, struct list_head *head)
+{
+ __list_add(entry, head, head->next);
+}
+
+/**
+ * Append a new element to the end of the list given with this list head.
+ *
+ * The list changes from:
+ * head → some element → ... → lastelement
+ * to
+ * head → some element → ... → lastelement → new element
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_add_tail(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_add_tail(struct list_head *entry, struct list_head *head)
+{
+ __list_add(entry, head->prev, head);
+}
+
+static inline void
+__list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * Remove the element from the list it is in. Using this function will reset
+ * the pointers to/from this element so it is removed from the list. It does
+ * NOT free the element itself or manipulate it otherwise.
+ *
+ * Using list_del on a pure list head (like in the example at the top of
+ * this file) will NOT remove the first element from
+ * the list but rather reset the list as empty list.
+ *
+ * Example:
+ * list_del(&foo->entry);
+ *
+ * @param entry The element to remove.
+ */
+static inline void
+list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void
+list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * Check if the list is empty.
+ *
+ * Example:
+ * list_empty(&bar->list_of_foos);
+ *
+ * @return True if the list contains one or more elements or False otherwise.
+ */
+static inline bool
+list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * Returns a pointer to the container of this list element.
+ *
+ * Example:
+ * struct foo* f;
+ * f = container_of(&foo->entry, struct foo, entry);
+ * assert(f == foo);
+ *
+ * @param ptr Pointer to the struct list_head.
+ * @param type Data type of the list element.
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the data struct containing the list head.
+ */
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ (type *)((char *)(ptr) - (char *) &((type *)0)->member)
+#endif
+
+/**
+ * Alias of container_of
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * Retrieve the first list entry for the given list pointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the first list element.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
+ * Retrieve the last list entry for the given listpointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the last list element.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+#define __container_of(ptr, sample, member) \
+ (void *)container_of((ptr), typeof(*(sample)), member)
+
+/**
+ * Loop through the list given by head and set pos to struct in the list.
+ *
+ * Example:
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
+ * [modify iterator]
+ * }
+ *
+ * This macro is not safe for node deletion. Use list_for_each_entry_safe
+ * instead.
+ *
+ * @param pos Iterator variable of the type of the list elements.
+ * @param head List head
+ * @param member Member name of the struct list_head in the list elements.
+ *
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = __container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+/**
+ * Loop through the list, keeping a backup pointer to the element. This
+ * macro allows for the deletion of a list element while looping through the
+ * list.
+ *
+ * See list_for_each_entry for more details.
+ */
+#define list_for_each_entry_safe(pos, tmp, head, member) \
+ for (pos = __container_of((head)->next, pos, member), \
+ tmp = __container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
+
+
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = __container_of((head)->prev, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = __container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = __container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry_from(pos, head, member) \
+ for (; \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
new file mode 100644
index 000000000000..0898c3155292
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/notify.h>
+#include <nvif/object.h>
+#include <nvif/ioctl.h>
+#include <nvif/event.h>
+
+static inline int
+nvif_notify_put_(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_put_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
+ .ntfy.index = notify->index,
+ };
+
+ if (atomic_inc_return(&notify->putcnt) != 1)
+ return 0;
+
+ return nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_notify_put(struct nvif_notify *notify)
+{
+ if (likely(notify->object) &&
+ test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
+ int ret = nvif_notify_put_(notify);
+ if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
+ flush_work(&notify->work);
+ return ret;
+ }
+ return 0;
+}
+
+static inline int
+nvif_notify_get_(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_get_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
+ .ntfy.index = notify->index,
+ };
+
+ if (atomic_dec_return(&notify->putcnt) != 0)
+ return 0;
+
+ return nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_notify_get(struct nvif_notify *notify)
+{
+ if (likely(notify->object) &&
+ !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
+ return nvif_notify_get_(notify);
+ return 0;
+}
+
+static inline int
+nvif_notify_func(struct nvif_notify *notify, bool keep)
+{
+ int ret = notify->func(notify);
+ if (ret == NVIF_NOTIFY_KEEP ||
+ !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ if (!keep)
+ atomic_dec(&notify->putcnt);
+ else
+ nvif_notify_get_(notify);
+ }
+ return ret;
+}
+
+static void
+nvif_notify_work(struct work_struct *work)
+{
+ struct nvif_notify *notify = container_of(work, typeof(*notify), work);
+ nvif_notify_func(notify, true);
+}
+
+int
+nvif_notify(const void *header, u32 length, const void *data, u32 size)
+{
+ struct nvif_notify *notify = NULL;
+ const union {
+ struct nvif_notify_rep_v0 v0;
+ } *args = header;
+ int ret = NVIF_NOTIFY_DROP;
+
+ if (length == sizeof(args->v0) && args->v0.version == 0) {
+ if (WARN_ON(args->v0.route))
+ return NVIF_NOTIFY_DROP;
+ notify = (void *)(unsigned long)args->v0.token;
+ }
+
+ if (!WARN_ON(notify == NULL)) {
+ struct nvif_client *client = nvif_client(notify->object);
+ if (!WARN_ON(notify->size != size)) {
+ atomic_inc(&notify->putcnt);
+ if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
+ memcpy((void *)notify->data, data, size);
+ schedule_work(&notify->work);
+ return NVIF_NOTIFY_DROP;
+ }
+ notify->data = data;
+ ret = nvif_notify_func(notify, client->driver->keep);
+ notify->data = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int
+nvif_notify_fini(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_del_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
+ .ntfy.index = notify->index,
+ };
+ int ret = nvif_notify_put(notify);
+ if (ret >= 0 && object) {
+ ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret == 0) {
+ nvif_object_ref(NULL, &notify->object);
+ kfree((void *)notify->data);
+ }
+ }
+ return ret;
+}
+
+int
+nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
+ int (*func)(struct nvif_notify *), bool work, u8 event,
+ void *data, u32 size, u32 reply, struct nvif_notify *notify)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_new_v0 ntfy;
+ struct nvif_notify_req_v0 req;
+ } *args;
+ int ret = -ENOMEM;
+
+ notify->object = NULL;
+ nvif_object_ref(object, &notify->object);
+ notify->flags = 0;
+ atomic_set(&notify->putcnt, 1);
+ notify->dtor = dtor;
+ notify->func = func;
+ notify->data = NULL;
+ notify->size = reply;
+ if (work) {
+ INIT_WORK(&notify->work, nvif_notify_work);
+ set_bit(NVIF_NOTIFY_WORK, &notify->flags);
+ notify->data = kmalloc(notify->size, GFP_KERNEL);
+ if (!notify->data)
+ goto done;
+ }
+
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ goto done;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
+ args->ntfy.version = 0;
+ args->ntfy.event = event;
+ args->req.version = 0;
+ args->req.reply = notify->size;
+ args->req.route = 0;
+ args->req.token = (unsigned long)(void *)notify;
+
+ memcpy(args->req.data, data, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ notify->index = args->ntfy.index;
+ kfree(args);
+done:
+ if (ret)
+ nvif_notify_fini(notify);
+ return ret;
+}
+
+static void
+nvif_notify_del(struct nvif_notify *notify)
+{
+ nvif_notify_fini(notify);
+ kfree(notify);
+}
+
+void
+nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
+{
+ BUG_ON(notify != NULL);
+ if (*pnotify)
+ (*pnotify)->dtor(*pnotify);
+ *pnotify = notify;
+}
+
+int
+nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
+ bool work, u8 type, void *data, u32 size, u32 reply,
+ struct nvif_notify **pnotify)
+{
+ struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
+ if (notify) {
+ int ret = nvif_notify_init(object, nvif_notify_del, func, work,
+ type, data, size, reply, notify);
+ if (ret) {
+ kfree(notify);
+ notify = NULL;
+ }
+ *pnotify = notify;
+ return ret;
+ }
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/nvif/notify.h
new file mode 100644
index 000000000000..9ebfa3b45e76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.h
@@ -0,0 +1,39 @@
+#ifndef __NVIF_NOTIFY_H__
+#define __NVIF_NOTIFY_H__
+
+struct nvif_notify {
+ struct nvif_object *object;
+ int index;
+
+#define NVIF_NOTIFY_USER 0
+#define NVIF_NOTIFY_WORK 1
+ unsigned long flags;
+ atomic_t putcnt;
+ void (*dtor)(struct nvif_notify *);
+#define NVIF_NOTIFY_DROP 0
+#define NVIF_NOTIFY_KEEP 1
+ int (*func)(struct nvif_notify *);
+
+ /* this is const for a *very* good reason - the data might be on the
+ * stack from an irq handler. if you're not nvif/notify.c then you
+ * should probably think twice before casting it away...
+ */
+ const void *data;
+ u32 size;
+ struct work_struct work;
+};
+
+int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
+ int (*func)(struct nvif_notify *), bool work, u8 type,
+ void *data, u32 size, u32 reply, struct nvif_notify *);
+int nvif_notify_fini(struct nvif_notify *);
+int nvif_notify_get(struct nvif_notify *);
+int nvif_notify_put(struct nvif_notify *);
+int nvif_notify(const void *, u32, const void *, u32);
+
+int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
+ bool work, u8 type, void *data, u32 size, u32 reply,
+ struct nvif_notify **);
+void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
new file mode 100644
index 000000000000..dd85b56f6aa5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "object.h"
+#include "client.h"
+#include "driver.h"
+#include "ioctl.h"
+
+int
+nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
+{
+ struct nvif_client *client = nvif_client(object);
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *args = data;
+
+ if (size >= sizeof(*args) && args->v0.version == 0) {
+ args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
+ args->v0.path_nr = 0;
+ while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
+ args->v0.path[args->v0.path_nr++] = object->handle;
+ if (object->parent == object)
+ break;
+ object = object->parent;
+ }
+ } else
+ return -ENOSYS;
+
+ return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+}
+
+int
+nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_sclass_v0 sclass;
+ } *args;
+ u32 size = count * sizeof(args->sclass.oclass[0]);
+ int ret;
+
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ return -ENOMEM;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+ args->sclass.version = 0;
+ args->sclass.count = count;
+
+ memcpy(args->sclass.oclass, oclass, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ ret = ret ? ret : args->sclass.count;
+ memcpy(oclass, args->sclass.oclass, size);
+ kfree(args);
+ return ret;
+}
+
+u32
+nvif_object_rd(struct nvif_object *object, int size, u64 addr)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_rd_v0 rd;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_RD,
+ .rd.size = size,
+ .rd.addr = addr,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret) {
+ /*XXX: warn? */
+ return 0;
+ }
+ return args.rd.data;
+}
+
+void
+nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_wr_v0 wr;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_WR,
+ .wr.size = size,
+ .wr.addr = addr,
+ .wr.data = data,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret) {
+ /*XXX: warn? */
+ }
+}
+
+int
+nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_mthd_v0 mthd;
+ } *args;
+ u8 stack[128];
+ int ret;
+
+ if (sizeof(*args) + size > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_MTHD;
+ args->mthd.version = 0;
+ args->mthd.method = mthd;
+
+ memcpy(args->mthd.data, data, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ memcpy(data, args->mthd.data, size);
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+ if (object->map.size) {
+ struct nvif_client *client = nvif_client(object);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_unmap unmap;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_UNMAP,
+ };
+
+ if (object->map.ptr) {
+ client->driver->unmap(client, object->map.ptr,
+ object->map.size);
+ object->map.ptr = NULL;
+ }
+
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ object->map.size = 0;
+ }
+}
+
+int
+nvif_object_map(struct nvif_object *object)
+{
+ struct nvif_client *client = nvif_client(object);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_map_v0 map;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_MAP,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret == 0) {
+ object->map.size = args.map.length;
+ object->map.ptr = client->driver->map(client, args.map.handle,
+ object->map.size);
+ if (ret = -ENOMEM, object->map.ptr)
+ return 0;
+ nvif_object_unmap(object);
+ }
+ return ret;
+}
+
+struct ctor {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+};
+
+void
+nvif_object_fini(struct nvif_object *object)
+{
+ struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
+ if (object->parent) {
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ };
+
+ nvif_object_unmap(object);
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (object->data) {
+ object->size = 0;
+ object->data = NULL;
+ kfree(ctor);
+ }
+ nvif_object_ref(NULL, &object->parent);
+ }
+}
+
+int
+nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
+ u32 handle, u32 oclass, void *data, u32 size,
+ struct nvif_object *object)
+{
+ struct ctor *ctor;
+ int ret = 0;
+
+ object->parent = NULL;
+ object->object = object;
+ nvif_object_ref(parent, &object->parent);
+ kref_init(&object->refcount);
+ object->handle = handle;
+ object->oclass = oclass;
+ object->data = NULL;
+ object->size = 0;
+ object->dtor = dtor;
+ object->map.ptr = NULL;
+ object->map.size = 0;
+
+ if (object->parent) {
+ if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+ nvif_object_fini(object);
+ return -ENOMEM;
+ }
+ object->data = ctor->new.data;
+ object->size = size;
+ memcpy(object->data, data, size);
+
+ ctor->ioctl.version = 0;
+ ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
+ ctor->new.version = 0;
+ ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
+ ctor->new.token = (unsigned long)(void *)object;
+ ctor->new.handle = handle;
+ ctor->new.oclass = oclass;
+
+ ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
+ object->size, &object->priv);
+ }
+
+ if (ret)
+ nvif_object_fini(object);
+ return ret;
+}
+
+static void
+nvif_object_del(struct nvif_object *object)
+{
+ nvif_object_fini(object);
+ kfree(object);
+}
+
+int
+nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
+ void *data, u32 size, struct nvif_object **pobject)
+{
+ struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (object) {
+ int ret = nvif_object_init(parent, nvif_object_del, handle,
+ oclass, data, size, object);
+ if (ret) {
+ kfree(object);
+ object = NULL;
+ }
+ *pobject = object;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+static void
+nvif_object_put(struct kref *kref)
+{
+ struct nvif_object *object =
+ container_of(kref, typeof(*object), refcount);
+ object->dtor(object);
+}
+
+void
+nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
+{
+ if (object)
+ kref_get(&object->refcount);
+ if (*pobject)
+ kref_put(&(*pobject)->refcount, nvif_object_put);
+ *pobject = object;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h
new file mode 100644
index 000000000000..fac3a3bbec44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.h
@@ -0,0 +1,75 @@
+#ifndef __NVIF_OBJECT_H__
+#define __NVIF_OBJECT_H__
+
+#include <nvif/os.h>
+
+struct nvif_object {
+ struct nvif_object *parent;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct kref refcount;
+ u32 handle;
+ u32 oclass;
+ void *data;
+ u32 size;
+ void *priv; /*XXX: hack */
+ void (*dtor)(struct nvif_object *);
+ struct {
+ void *ptr;
+ u32 size;
+ } map;
+};
+
+int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
+ u32 handle, u32 oclass, void *, u32,
+ struct nvif_object *);
+void nvif_object_fini(struct nvif_object *);
+int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
+ void *, u32, struct nvif_object **);
+void nvif_object_ref(struct nvif_object *, struct nvif_object **);
+int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
+int nvif_object_sclass(struct nvif_object *, u32 *, int);
+u32 nvif_object_rd(struct nvif_object *, int, u64);
+void nvif_object_wr(struct nvif_object *, int, u64, u32);
+int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
+int nvif_object_map(struct nvif_object *);
+void nvif_object_unmap(struct nvif_object *);
+
+#define nvif_object(a) (a)->object
+
+#define ioread8_native ioread8
+#define iowrite8_native iowrite8
+#define nvif_rd(a,b,c) ({ \
+ struct nvif_object *_object = nvif_object(a); \
+ u32 _data; \
+ if (likely(_object->map.ptr)) \
+ _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \
+ else \
+ _data = nvif_object_rd(_object, (b) / 8, (c)); \
+ _data; \
+})
+#define nvif_wr(a,b,c,d) ({ \
+ struct nvif_object *_object = nvif_object(a); \
+ if (likely(_object->map.ptr)) \
+ iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \
+ else \
+ nvif_object_wr(_object, (b) / 8, (c), (d)); \
+})
+#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; })
+#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
+#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
+#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_mask(a,b,c,d) ({ \
+ u32 _v = nvif_rd32(nvif_object(a), (b)); \
+ nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \
+ _v; \
+})
+
+#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+
+/*XXX*/
+#include <core/object.h>
+#define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h
new file mode 120000
index 000000000000..bd744b2cf5cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/os.h
@@ -0,0 +1 @@
+../core/os.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/nvif/unpack.h
new file mode 100644
index 000000000000..5933188b4a77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/unpack.h
@@ -0,0 +1,24 @@
+#ifndef __NVIF_UNPACK_H__
+#define __NVIF_UNPACK_H__
+
+#define nvif_unvers(d) ({ \
+ ret = (size == sizeof(d)) ? 0 : -ENOSYS; \
+ (ret == 0); \
+})
+
+#define nvif_unpack(d,vl,vh,m) ({ \
+ if ((vl) == 0 || ret == -ENOSYS) { \
+ int _size = sizeof(d); \
+ if (_size <= size && (d).version >= (vl) && \
+ (d).version <= (vh)) { \
+ data = (u8 *)data + _size; \
+ size = size - _size; \
+ ret = ((m) || !size) ? 0 : -E2BIG; \
+ } else { \
+ ret = -ENOSYS; \
+ } \
+ } \
+ (ret == 0); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 36bc5cc80816..a94b11f7859d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,8 +32,16 @@ struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
struct drm_encoder *encoder;
+ bool hdmi_mode;
};
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ return omap_connector->hdmi_mode;
+}
+
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
@@ -162,10 +170,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
+
+ omap_connector->hdmi_mode =
+ drm_detect_hdmi_monitor(edid);
} else {
drm_mode_connector_update_edid_property(
connector, NULL);
}
+
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 002b9721e85a..862ba03c236c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -629,6 +629,7 @@ static struct drm_driver omap_drm_driver = {
.lastclose = dev_lastclose,
.preclose = dev_preclose,
.postclose = dev_postclose,
+ .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = omap_irq_enable_vblank,
.disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index b08a450d1b5d..84d73a61b34b 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -187,6 +187,7 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 5290a88c681d..7445fb1491ae 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_edid.h>
+
#include "omap_drv.h"
#include "drm_crtc.h"
@@ -89,6 +91,31 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+ int r;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->driver->set_hdmi_mode)
+ dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
+ struct hdmi_avi_infoframe avi;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode);
+ if (r == 0)
+ dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
+ }
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 4fcca8d42796..a2dbfb1737b4 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -171,7 +171,7 @@ static struct dma_buf_ops omap_dmabuf_ops = {
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
+ return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
}
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4ec874da5668..bee9f72b3a93 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -5,7 +5,7 @@ config DRM_PANEL
Panel registration and lookup framework.
menu "Display Panels"
- depends on DRM_PANEL
+ depends on DRM && DRM_PANEL
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
@@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE
config DRM_PANEL_LD9040
tristate "LD9040 RGB/SPI panel"
- depends on DRM && DRM_PANEL
- depends on OF
- select SPI
+ depends on OF && SPI
select VIDEOMODE_HELPERS
config DRM_PANEL_S6E8AA0
tristate "S6E8AA0 DSI video mode panel"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index db1601fdbe29..42ac67b21e9f 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -110,7 +110,10 @@ struct ld9040 {
int error;
};
-#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+ return container_of(panel, struct ld9040, panel);
+}
static int ld9040_clear_error(struct ld9040 *ctx)
{
@@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx)
static int ld9040_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
struct ld9040 *ctx = panel_to_ld9040(panel);
msleep(120);
@@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel)
return ld9040_power_off(ctx);
}
-static int ld9040_enable(struct drm_panel *panel)
+static int ld9040_prepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
int ret;
@@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel)
ret = ld9040_clear_error(ctx);
if (ret < 0)
- ld9040_disable(panel);
+ ld9040_unprepare(panel);
return ret;
}
+static int ld9040_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int ld9040_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs ld9040_drm_funcs = {
.disable = ld9040_disable,
+ .unprepare = ld9040_unprepare,
+ .prepare = ld9040_prepare,
.enable = ld9040_enable,
.get_modes = ld9040_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 06e57a26db7a..b5217fe37f02 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -120,7 +120,10 @@ struct s6e8aa0 {
int error;
};
-#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa0, panel);
+}
static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
{
@@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ ssize_t ret;
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
+ ret = mipi_dsi_dcs_write(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
data);
ctx->error = ret;
}
@@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
if (ctx->error < 0)
return ctx->error;
- ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
+ ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
ctx->error = ret;
@@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
static int s6e8aa0_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
@@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel)
return s6e8aa0_power_off(ctx);
}
-static int s6e8aa0_enable(struct drm_panel *panel)
+static int s6e8aa0_prepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
int ret;
@@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel)
ret = ctx->error;
if (ret < 0)
- s6e8aa0_disable(panel);
+ s6e8aa0_unprepare(panel);
return ret;
}
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int s6e8aa0_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
.disable = s6e8aa0_disable,
+ .unprepare = s6e8aa0_unprepare,
+ .prepare = s6e8aa0_prepare,
.enable = s6e8aa0_enable,
.get_modes = s6e8aa0_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a25136132c31..4ce1db0a68ff 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -37,14 +37,35 @@ struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ unsigned int bpc;
+
struct {
unsigned int width;
unsigned int height;
} size;
+
+ /**
+ * @prepare: the time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ * @enable: the time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data
+ * @disable: the time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible)
+ * @unprepare: the time (in milliseconds) that it takes for the panel
+ * to power itself down completely
+ */
+ struct {
+ unsigned int prepare;
+ unsigned int enable;
+ unsigned int disable;
+ unsigned int unprepare;
+ } delay;
};
struct panel_simple {
struct drm_panel base;
+ bool prepared;
bool enabled;
const struct panel_desc *desc;
@@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel)
backlight_update_status(p->backlight);
}
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_unprepare(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->prepared)
+ return 0;
+
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->enabled = false;
+
+ if (p->desc->delay.unprepare)
+ msleep(p->desc->delay.unprepare);
+
+ p->prepared = false;
return 0;
}
-static int panel_simple_enable(struct drm_panel *panel)
+static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int err;
- if (p->enabled)
+ if (p->prepared)
return 0;
err = regulator_enable(p->supply);
@@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->desc->delay.enable)
+ msleep(p->desc->delay.enable);
+
if (p->backlight) {
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
@@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
+ .unprepare = panel_simple_unprepare,
+ .prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
};
@@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
+ panel->prepared = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- if (err != -ENOENT) {
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
- panel->enable_gpio = NULL;
- } else {
+ if (panel->enable_gpio) {
err = gpiod_direction_output(panel->enable_gpio, 0);
if (err < 0) {
dev_err(dev, "failed to setup GPIO: %d\n", err);
@@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = {
static const struct panel_desc auo_b101aw03 = {
.modes = &auo_b101aw03_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
static const struct panel_desc auo_b133xtn01 = {
.modes = &auo_b133xtn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
},
};
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
static const struct panel_desc chunghwa_claa101wa01a = {
.modes = &chunghwa_claa101wa01a_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 220,
.height = 120,
@@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
static const struct panel_desc chunghwa_claa101wb01 = {
.modes = &chunghwa_claa101wb01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
static const struct panel_desc edt_et057090dhu = {
.modes = &edt_et057090dhu_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
@@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
static const struct panel_desc edt_etm0700g0dh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 152,
.height = 91,
},
};
+static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
+ .clock = 32260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 168,
+ .hsync_end = 800 + 168 + 64,
+ .htotal = 800 + 168 + 64 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 37,
+ .vsync_end = 480 + 37 + 2,
+ .vtotal = 480 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc foxlink_fl500wvr00_a0t = {
+ .modes = &foxlink_fl500wvr00_a0t_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 71000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 6,
+ .htotal = 1366 + 64 + 6 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 4,
+ .vtotal = 768 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .modes = &innolux_n116bge_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n156bge_l21_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 16,
+ .hsync_end = 1366 + 16 + 34,
+ .htotal = 1366 + 16 + 34 + 50,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 6,
+ .vtotal = 768 + 2 + 6 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_n156bge_l21 = {
+ .modes = &innolux_n156bge_l21_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 344,
+ .height = 193,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = {
static const struct panel_desc lg_lp129qe = {
.modes = &lg_lp129qe_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 272,
.height = 181,
@@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
static const struct panel_desc samsung_ltn101nt05 = {
.modes = &samsung_ltn101nt05_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 1024,
.height = 600,
@@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
@@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "foxlink,fl500wvr00-a0t",
+ .data = &foxlink_fl500wvr00_a0t,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n156bge-l21",
+ .data = &innolux_n156bge_l21,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "simple-panel",
- }, {
/* sentinel */
}
};
@@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.height = 151,
},
},
- .flags = MIPI_DSI_MODE_VIDEO,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
@@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.height = 136,
},
},
- .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
index ea046ba691d2..bacc4aff1201 100644
--- a/drivers/gpu/drm/qxl/Makefile
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
-qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index eb89653a7a17..97823644d347 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -620,17 +620,10 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (ret == -EBUSY)
return -EBUSY;
- if (surf->fence.num_active_releases > 0 && stall == false) {
- qxl_bo_unreserve(surf);
- return -EBUSY;
- }
-
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
- spin_lock(&surf->tbo.bdev->fence_lock);
ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
- spin_unlock(&surf->tbo.bdev->fence_lock);
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index c3c2bbdc6674..6911b8c44492 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -58,9 +58,17 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
- seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
- (unsigned long)bo->gem_base.size, bo->pin_count,
- bo->tbo.sync_obj, bo->fence.num_active_releases);
+ struct reservation_object_list *fobj;
+ int rel;
+
+ rcu_read_lock();
+ fobj = rcu_dereference(bo->tbo.resv->fence);
+ rel = fobj ? fobj->shared_count : 0;
+ rcu_read_unlock();
+
+ seq_printf(m, "size %ld, pc %d, num releases %d\n",
+ (unsigned long)bo->gem_base.size,
+ bo->pin_count, rel);
}
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8ced08b6291..af9e78546688 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -187,6 +187,54 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
+static int qxl_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+ struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
+ struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
+ struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
+ struct qxl_bo *bo = gem_to_qxl_bo(qfb_src->obj);
+ unsigned long flags;
+ struct drm_clip_rect norect = {
+ .x1 = 0,
+ .y1 = 0,
+ .x2 = fb->width,
+ .y2 = fb->height
+ };
+ int inc = 1;
+ int one_clip_rect = 1;
+ int ret = 0;
+
+ crtc->primary->fb = fb;
+ bo_old->is_primary = false;
+ bo->is_primary = true;
+
+ ret = qxl_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
+ &norect, one_clip_rect, inc);
+
+ drm_vblank_get(dev, qcrtc->index);
+
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_send_vblank_event(dev, qcrtc->index, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+ drm_vblank_put(dev, qcrtc->index);
+
+ qxl_bo_unreserve(bo);
+
+ return 0;
+}
+
static int
qxl_hide_cursor(struct qxl_device *qdev)
{
@@ -374,6 +422,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
.cursor_move = qxl_crtc_cursor_move,
.set_config = drm_crtc_helper_set_config,
.destroy = qxl_crtc_destroy,
+ .page_flip = qxl_crtc_page_flip,
};
static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e936634d65c..1d9b80c91a15 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -38,7 +38,7 @@
#include "qxl_object.h"
extern int qxl_max_ioctls;
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+static const struct pci_device_id pciidlist[] = {
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
0xffff00, 0 },
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
@@ -84,6 +84,7 @@ static const struct file_operations qxl_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
+ .read = drm_read,
.mmap = qxl_mmap,
};
@@ -195,6 +196,20 @@ static int qxl_pm_restore(struct device *dev)
return qxl_drm_resume(drm_dev, false);
}
+static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return dev->vblank[crtc].count.counter;
+}
+
+static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
+
+static void qxl_noop_disable_vblank(struct drm_device *dev, int crtc)
+{
+}
+
static const struct dev_pm_ops qxl_pm_ops = {
.suspend = qxl_pm_suspend,
.resume = qxl_pm_resume,
@@ -212,10 +227,15 @@ static struct pci_driver qxl_pci_driver = {
};
static struct drm_driver qxl_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.load = qxl_driver_load,
.unload = qxl_driver_unload,
+ .get_vblank_counter = qxl_noop_get_vblank_counter,
+ .enable_vblank = qxl_noop_enable_vblank,
+ .disable_vblank = qxl_noop_disable_vblank,
+
+ .set_busid = drm_pci_set_busid,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
@@ -224,6 +244,17 @@ static struct drm_driver qxl_driver = {
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = qxl_gem_prime_pin,
+ .gem_prime_unpin = qxl_gem_prime_unpin,
+ .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
+ .gem_prime_vmap = qxl_gem_prime_vmap,
+ .gem_prime_vunmap = qxl_gem_prime_vunmap,
+ .gem_prime_mmap = qxl_gem_prime_mmap,
.gem_free_object = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 36ed40ba773f..d75c0a9f674f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,6 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
+#include <linux/fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -95,31 +96,24 @@ enum {
QXL_INTERRUPT_IO_CMD |\
QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
-struct qxl_fence {
- struct qxl_device *qdev;
- uint32_t num_active_releases;
- uint32_t *release_ids;
- struct radix_tree_root tree;
-};
-
struct qxl_bo {
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
- u32 placements[3];
+ struct ttm_place placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
unsigned pin_count;
void *kptr;
int type;
+
/* Constant after initialization */
struct drm_gem_object gem_base;
bool is_primary; /* is this now a primary surface */
bool hw_surf_alloc;
struct qxl_surface surf;
uint32_t surface_id;
- struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
@@ -191,6 +185,8 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
+ struct fence base;
+
int id;
int type;
uint32_t release_offset;
@@ -284,7 +280,9 @@ struct qxl_device {
uint8_t slot_gen_bits;
uint64_t va_slot_mask;
+ spinlock_t release_lock;
struct idr release_idr;
+ uint32_t release_seqno;
spinlock_t release_idr_lock;
struct mutex async_io_mutex;
unsigned int last_sent_io_cmd;
@@ -532,6 +530,18 @@ int qxl_garbage_collect(struct qxl_device *qdev);
int qxl_debugfs_init(struct drm_minor *minor);
void qxl_debugfs_takedown(struct drm_minor *minor);
+/* qxl_prime.c */
+int qxl_gem_prime_pin(struct drm_gem_object *obj);
+void qxl_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+ struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+void *qxl_gem_prime_vmap(struct drm_gem_object *obj);
+void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int qxl_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
/* qxl_irq.c */
int qxl_irq_init(struct qxl_device *qdev);
irqreturn_t qxl_irq_handler(int irq, void *arg);
@@ -561,10 +571,4 @@ qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
-/* qxl_fence.c */
-void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
-int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
-int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
-void qxl_fence_fini(struct qxl_fence *qfence);
-
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
deleted file mode 100644
index ae59e91cfb9a..000000000000
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- * Alon Levy
- */
-
-
-#include "qxl_drv.h"
-
-/* QXL fencing-
-
- When we submit operations to the GPU we pass a release reference to the GPU
- with them, the release reference is then added to the release ring when
- the GPU is finished with that particular operation and has removed it from
- its tree.
-
- So we have can have multiple outstanding non linear fences per object.
-
- From a TTM POV we only care if the object has any outstanding releases on
- it.
-
- we wait until all outstanding releases are processeed.
-
- sync object is just a list of release ids that represent that fence on
- that buffer.
-
- we just add new releases onto the sync object attached to the object.
-
- This currently uses a radix tree to store the list of release ids.
-
- For some reason every so often qxl hw fails to release, things go wrong.
-*/
-/* must be called with the fence lock held */
-void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
-{
- radix_tree_insert(&qfence->tree, rel_id, qfence);
- qfence->num_active_releases++;
-}
-
-int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
-{
- void *ret;
- int retval = 0;
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- spin_lock(&bo->tbo.bdev->fence_lock);
-
- ret = radix_tree_delete(&qfence->tree, rel_id);
- if (ret == qfence)
- qfence->num_active_releases--;
- else {
- DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
- retval = -ENOENT;
- }
- spin_unlock(&bo->tbo.bdev->fence_lock);
- return retval;
-}
-
-
-int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
-{
- qfence->qdev = qdev;
- qfence->num_active_releases = 0;
- INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
- return 0;
-}
-
-void qxl_fence_fini(struct qxl_fence *qfence)
-{
- kfree(qfence->release_ids);
- qfence->num_active_releases = 0;
-}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index fd88eb4a3f79..b2977a181935 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -223,6 +223,7 @@ static int qxl_device_init(struct qxl_device *qdev,
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
+ spin_lock_init(&qdev->release_lock);
idr_init(&qdev->surf_id_idr);
spin_lock_init(&qdev->surf_id_idr_lock);
@@ -297,6 +298,9 @@ int qxl_driver_unload(struct drm_device *dev)
if (qdev == NULL)
return 0;
+
+ drm_vblank_cleanup(dev);
+
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
@@ -324,15 +328,20 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
if (r)
goto out;
+ r = drm_vblank_init(dev, 1);
+ if (r)
+ goto unload;
+
r = qxl_modeset_init(qdev);
- if (r) {
- qxl_driver_unload(dev);
- goto out;
- }
+ if (r)
+ goto unload;
drm_kms_helper_poll_init(qdev->ddev);
return 0;
+unload:
+ qxl_driver_unload(dev);
+
out:
kfree(qdev);
return r;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b95f144f0b49..69c104c3240f 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -36,7 +36,6 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
qxl_surface_evict(qdev, bo, false);
- qxl_fence_fini(&bo->fence);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
@@ -55,21 +54,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ unsigned i;
- qbo->placement.fpfn = 0;
- qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ qbo->placements[i].fpfn = 0;
+ qbo->placements[i].lpfn = 0;
+ }
}
@@ -99,7 +101,6 @@ int qxl_bo_create(struct qxl_device *qdev,
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
- qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
if (surf)
@@ -259,7 +260,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
if (bo->pin_count)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 83a423293afd..37af1bc0dd00 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
}
return r;
}
- spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
- if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.bdev->fence_lock);
+
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
new file mode 100644
index 000000000000..ba0689c728e8
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "qxl_drv.h"
+
+/* Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with qxl */
+
+int qxl_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
+
+void qxl_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+ struct drm_device *dev, size_t size,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int qxl_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area)
+{
+ WARN_ONCE(1, "not implemented");
+ return ENOSYS;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 14e776f1d14e..a6e19c83143e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,6 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
+#include <trace/events/fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -39,6 +40,88 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+static const char *qxl_get_driver_name(struct fence *fence)
+{
+ return "qxl";
+}
+
+static const char *qxl_get_timeline_name(struct fence *fence)
+{
+ return "release";
+}
+
+static bool qxl_nop_signaling(struct fence *fence)
+{
+ /* fences are always automatically signaled, so just pretend we did this.. */
+ return true;
+}
+
+static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+{
+ struct qxl_device *qdev;
+ struct qxl_release *release;
+ int count = 0, sc = 0;
+ bool have_drawable_releases;
+ unsigned long cur, end = jiffies + timeout;
+
+ qdev = container_of(fence->lock, struct qxl_device, release_lock);
+ release = container_of(fence, struct qxl_release, base);
+ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
+
+retry:
+ sc++;
+
+ if (fence_is_signaled(fence))
+ goto signaled;
+
+ qxl_io_notify_oom(qdev);
+
+ for (count = 0; count < 11; count++) {
+ if (!qxl_queue_garbage_collect(qdev, true))
+ break;
+
+ if (fence_is_signaled(fence))
+ goto signaled;
+ }
+
+ if (fence_is_signaled(fence))
+ goto signaled;
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+
+ if (time_after(jiffies, end))
+ return 0;
+
+ if (have_drawable_releases && sc > 300) {
+ FENCE_WARN(fence, "failed to wait on release %d "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
+ goto signaled;
+ }
+ goto retry;
+ }
+ /*
+ * yeah, original sync_obj_wait gave up after 3 spins when
+ * have_drawable_releases is not set.
+ */
+
+signaled:
+ cur = jiffies;
+ if (time_after(cur, end))
+ return 0;
+ return end - cur;
+}
+
+static const struct fence_ops qxl_fence_ops = {
+ .get_driver_name = qxl_get_driver_name,
+ .get_timeline_name = qxl_get_timeline_name,
+ .enable_signaling = qxl_nop_signaling,
+ .wait = qxl_fence_wait,
+};
+
static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
@@ -46,13 +129,13 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release *release;
int handle;
size_t size = sizeof(*release);
- int idr_ret;
release = kmalloc(size, GFP_KERNEL);
if (!release) {
DRM_ERROR("Out of memory\n");
return 0;
}
+ release->base.ops = NULL;
release->type = type;
release->release_offset = 0;
release->surface_release_id = 0;
@@ -60,44 +143,61 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
- idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+ handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+ release->base.seqno = ++qdev->release_seqno;
spin_unlock(&qdev->release_idr_lock);
idr_preload_end();
- handle = idr_ret;
- if (idr_ret < 0)
- goto release_fail;
+ if (handle < 0) {
+ kfree(release);
+ *ret = NULL;
+ return handle;
+ }
*ret = release;
QXL_INFO(qdev, "allocated release %lld\n", handle);
release->id = handle;
-release_fail:
-
return handle;
}
+static void
+qxl_release_free_list(struct qxl_release *release)
+{
+ while (!list_empty(&release->bos)) {
+ struct qxl_bo_list *entry;
+ struct qxl_bo *bo;
+
+ entry = container_of(release->bos.next,
+ struct qxl_bo_list, tv.head);
+ bo = to_qxl_bo(entry->tv.bo);
+ qxl_bo_unref(&bo);
+ list_del(&entry->tv.head);
+ kfree(entry);
+ }
+}
+
void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- struct qxl_bo_list *entry, *tmp;
QXL_INFO(qdev, "release %d, type %d\n", release->id,
release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
- QXL_INFO(qdev, "release %llx\n",
- drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
- - DRM_FILE_OFFSET);
- qxl_fence_remove_release(&bo->fence, release->id);
- qxl_bo_unref(&bo);
- kfree(entry);
- }
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
spin_unlock(&qdev->release_idr_lock);
- kfree(release);
+
+ if (release->base.ops) {
+ WARN_ON(list_empty(&release->bos));
+ qxl_release_free_list(release);
+
+ fence_signal(&release->base);
+ fence_put(&release->base);
+ } else {
+ qxl_release_free_list(release);
+ kfree(release);
+ }
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
@@ -142,6 +242,10 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
+ ret = reservation_object_reserve_shared(bo->tbo.resv);
+ if (ret)
+ return ret;
+
/* allocate a surface for reserved + validated buffers */
ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
if (ret)
@@ -159,7 +263,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
if (ret)
return ret;
@@ -199,6 +303,8 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+ if (idr_ret < 0)
+ return idr_ret;
bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
@@ -239,6 +345,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
}
idr_ret = qxl_release_alloc(qdev, type, release);
+ if (idr_ret < 0) {
+ if (rbo)
+ *rbo = NULL;
+ return idr_ret;
+ }
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
@@ -319,40 +430,44 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_bo_driver *driver;
struct qxl_bo *qbo;
+ struct ttm_validate_buffer *entry;
+ struct qxl_device *qdev;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
- if (list_is_singular(&release->bos))
+ if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
+ qdev = container_of(bdev, struct qxl_device, mman.bdev);
+
+ /*
+ * Since we never really allocated a context and we don't want to conflict,
+ * set the highest bits. This will break if we really allow exporting of dma-bufs.
+ */
+ fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_fence_emit(&release->base);
+
driver = bdev->driver;
glob = bo->glob;
spin_lock(&glob->lru_lock);
- spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
qbo = to_qxl_bo(bo);
- if (!entry->bo->sync_obj)
- entry->bo->sync_obj = &qbo->fence;
-
- qxl_fence_add_release_locked(&qbo->fence, release->id);
-
+ reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
- entry->reserved = false;
}
- spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 71a1baeac14e..abe945a04fd4 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -188,11 +188,13 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct qxl_bo *qbo;
- static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ static struct ttm_place placements = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ };
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
- placement->fpfn = 0;
- placement->lpfn = 0;
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -355,92 +357,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
-
-static int qxl_sync_obj_wait(void *sync_obj,
- bool lazy, bool interruptible)
-{
- struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
- int count = 0, sc = 0;
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- if (qfence->num_active_releases == 0)
- return 0;
-
-retry:
- if (sc == 0) {
- if (bo->type == QXL_GEM_DOMAIN_SURFACE)
- qxl_update_surface(qfence->qdev, bo);
- } else if (sc >= 1) {
- qxl_io_notify_oom(qfence->qdev);
- }
-
- sc++;
-
- for (count = 0; count < 10; count++) {
- bool ret;
- ret = qxl_queue_garbage_collect(qfence->qdev, true);
- if (ret == false)
- break;
-
- if (qfence->num_active_releases == 0)
- return 0;
- }
-
- if (qfence->num_active_releases) {
- bool have_drawable_releases = false;
- void **slot;
- struct radix_tree_iter iter;
- int release_id;
-
- radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
- struct qxl_release *release;
-
- release_id = iter.index;
- release = qxl_release_from_id_locked(qfence->qdev, release_id);
- if (release == NULL)
- continue;
-
- if (release->type == QXL_RELEASE_DRAWABLE)
- have_drawable_releases = true;
- }
-
- qxl_queue_garbage_collect(qfence->qdev, true);
-
- if (have_drawable_releases || sc < 4) {
- if (sc > 2)
- /* back off */
- usleep_range(500, 1000);
- if (have_drawable_releases && sc > 300) {
- WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
- return -EBUSY;
- }
- goto retry;
- }
- }
- return 0;
-}
-
-static int qxl_sync_obj_flush(void *sync_obj)
-{
- return 0;
-}
-
-static void qxl_sync_obj_unref(void **sync_obj)
-{
- *sync_obj = NULL;
-}
-
-static void *qxl_sync_obj_ref(void *sync_obj)
-{
- return sync_obj;
-}
-
-static bool qxl_sync_obj_signaled(void *sync_obj)
-{
- struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
- return (qfence->num_active_releases == 0);
-}
-
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
@@ -467,16 +383,9 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
- .sync_obj_signaled = &qxl_sync_obj_signaled,
- .sync_obj_wait = &qxl_sync_obj_wait,
- .sync_obj_flush = &qxl_sync_obj_flush,
- .sync_obj_unref = &qxl_sync_obj_unref,
- .sync_obj_ref = &qxl_sync_obj_ref,
.move_notify = &qxl_bo_move_notify,
};
-
-
int qxl_ttm_init(struct qxl_device *qdev)
{
int r;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 59459fe4e8c5..1fae2f706b01 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -452,7 +452,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
- dev_priv->sarea = drm_getsarea(dev);
+ dev_priv->sarea = drm_legacy_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 5bd307cd8da1..4a59370eb580 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -62,6 +62,7 @@ static struct drm_driver driver = {
.load = r128_driver_load,
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
+ .set_busid = drm_pci_set_busid,
.get_vblank_counter = r128_get_vblank_counter,
.enable_vblank = r128_enable_vblank,
.disable_vblank = r128_disable_vblank,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0013ad0db9ef..7d7aed5357f0 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -60,7 +60,7 @@ radeon-y := radeon_drv.o
# add UMS driver
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
@@ -76,11 +76,11 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
- si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
+ si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
# add async DMA block
radeon-y += \
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 022561e28707..d416bb2ff48d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
WREG32_SMC(CG_THERMAL_CTRL, tmp);
#endif
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b625646bf3e2..1f598ab3b9a7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j, k;
+ int i, j;
switch (rdev->family) {
case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x130B) ||
(rdev->pdev->device == 0x130E) ||
(rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x1318) ||
(rdev->pdev->device == 0x131B)) {
rdev->config.cik.max_cu_per_sh = 4;
rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.max_sh_per_se,
rdev->config.cik.max_backends_per_se);
+ rdev->config.cik.active_cus = 0;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
- for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
- rdev->config.cik.active_cus +=
- hweight32(cik_get_cu_active_bitmap(rdev, i, j));
- }
+ rdev->config.cik.active_cus +=
+ hweight32(cik_get_cu_active_bitmap(rdev, i, j));
}
}
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
+/**
+ * cik_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+ if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+ /* Prevent the PFP from running ahead of the semaphore wait */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+ }
+
return true;
}
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return 0;
}
@@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, VMID(vm->id));
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
radeon_ring_write(ring, 0);
@@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
@@ -8229,8 +8246,10 @@ restart_ih:
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
- if (queue_reset)
- schedule_work(&rdev->reset_work);
+ if (queue_reset) {
+ rdev->needs_reset = true;
+ wake_up_all(&rdev->fence_queue);
+ }
if (queue_thermal)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index bcf480510ac2..192278bc993c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/radeon/drm_buffer.c
index 86a4a4a60afc..f4e0f3a3d7b1 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/radeon/drm_buffer.c
@@ -33,7 +33,7 @@
*/
#include <linux/export.h>
-#include <drm/drm_buffer.h>
+#include "drm_buffer.h"
/**
* Allocate the drm buffer object.
@@ -86,7 +86,6 @@ error_out:
kfree(*buf);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_buffer_alloc);
/**
* Copy the user data to the begin of the buffer and reset the processing
@@ -123,7 +122,6 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
buf->iterator = 0;
return 0;
}
-EXPORT_SYMBOL(drm_buffer_copy_from_user);
/**
* Free the drm buffer object
@@ -141,7 +139,6 @@ void drm_buffer_free(struct drm_buffer *buf)
kfree(buf);
}
}
-EXPORT_SYMBOL(drm_buffer_free);
/**
* Read an object from buffer that may be split to multiple parts. If object
@@ -178,4 +175,3 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
drm_buffer_advance(buf, objsize);
return obj;
}
-EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/radeon/drm_buffer.h b/drivers/gpu/drm/radeon/drm_buffer.h
new file mode 100644
index 000000000000..c80d3a340b94
--- /dev/null
+++ b/drivers/gpu/drm/radeon/drm_buffer.h
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#ifndef _DRM_BUFFER_H_
+#define _DRM_BUFFER_H_
+
+#include <drm/drmP.h>
+
+struct drm_buffer {
+ int iterator;
+ int size;
+ char *data[];
+};
+
+
+/**
+ * Return the index of page that buffer is currently pointing at.
+ */
+static inline int drm_buffer_page(struct drm_buffer *buf)
+{
+ return buf->iterator / PAGE_SIZE;
+}
+/**
+ * Return the index of the current byte in the page
+ */
+static inline int drm_buffer_index(struct drm_buffer *buf)
+{
+ return buf->iterator & (PAGE_SIZE - 1);
+}
+/**
+ * Return number of bytes that is left to process
+ */
+static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
+{
+ return buf->size - buf->iterator;
+}
+
+/**
+ * Advance the buffer iterator number of bytes that is given.
+ */
+static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
+{
+ buf->iterator += bytes;
+}
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: A pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size);
+
+/**
+ * Free the drm buffer object
+ */
+extern void drm_buffer_free(struct drm_buffer *buf);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+extern void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj);
+
+/**
+ * Returns the pointer to the dword which is offset number of elements from the
+ * current processing location.
+ *
+ * Caller must make sure that dword is not split in the buffer. This
+ * requirement is easily met if all the sizes of objects in buffer are
+ * multiples of dword and PAGE_SIZE is multiple dword.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ * offset: The index of the dword relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
+ int offset)
+{
+ int iter = buffer->iterator + offset * 4;
+ return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+/**
+ * Returns the pointer to the dword which is offset number of elements from
+ * the current processing location.
+ *
+ * Call to this function doesn't change the processing location.
+ *
+ * offset: The index of the byte relative to the internat iterator.
+ */
+static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
+ int offset)
+{
+ int iter = buffer->iterator + offset;
+ return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
+}
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4fedd14e670a..dbca60c7d097 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 478caefe0fef..afaba388c36d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9ef8c38f2d66..8b58e11b64fa 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_uvd_dpm(rdev, !gate);
}
-static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
{
u8 i;
struct radeon_vce_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= 0) /* XXX */
+ if (table->entries[i].evclk >= evclk)
break;
}
@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
- pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+ pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = true;
+ if (radeon_bapm == 0)
+ pi->bapm_enable = false;
+ else
+ pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 327b85f7fd0d..ba89375f197f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cayman_cp_enable(rdev, true);
@@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
/* XXX init other rings */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 04b5940b8923..4c5ec44ff328 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return r;
}
@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 58f0473aa73f..67780374a652 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 75b30338c226..1bc4704034ce 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
static void r300_errata(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 84b1d5367a11..9418e388b045 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -34,10 +34,10 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_buffer.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
#include "r300_reg.h"
+#include "drm_buffer.h"
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 802b19220a21..2828605aef3f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c70a504d96af..a95ced569d84 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -122,6 +122,94 @@ u32 r600_get_xclk(struct radeon_device *rdev)
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
{
+ unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
+ UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
+
+ if (rdev->family >= CHIP_RS780)
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
+ ~UPLL_BYPASS_CNTL);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ if (rdev->clock.spll.reference_freq == 10000)
+ ref_div = 34;
+ else
+ ref_div = 4;
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+ ref_div + 1, 0xFFF, 2, 30, ~0,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
+ fb_div >>= 1;
+ else
+ fb_div |= 1;
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* For RS780 we have to choose ref clk */
+ if (rdev->family >= CHIP_RS780)
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
+ ~UPLL_REFCLK_SRC_SEL_MASK);
+
+ /* set the required fb, ref and post divder values */
+ WREG32_P(CG_UPLL_FUNC_CNTL,
+ UPLL_FB_DIV(fb_div) |
+ UPLL_REF_DIV(ref_div),
+ ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_SW_HILEN(vclk_div >> 1) |
+ UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+ UPLL_SW_HILEN2(dclk_div >> 1) |
+ UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
+ UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
+ ~UPLL_SW_MASK);
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* deassert BYPASS EN */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ if (rdev->family >= CHIP_RS780)
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
return 0;
}
@@ -992,6 +1080,8 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1042,6 +1132,8 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
}
@@ -2547,7 +2639,7 @@ int r600_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2775,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2753,6 +2845,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
+/**
+ * r600_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -2768,6 +2871,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+ /* PFP_SYNC_ME packet only exists on 7xx+ */
+ if (emit_wait && (rdev->family >= CHIP_RV770)) {
+ /* Prevent the PFP from running ahead of the semaphore wait */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+ }
+
return true;
}
@@ -2845,7 +2955,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -2899,6 +3009,18 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
+ if (rdev->has_uvd) {
+ r = uvd_v1_0_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ }
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ }
+
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -2927,6 +3049,18 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ if (rdev->has_uvd) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
+ RADEON_CP_PACKET2);
+ if (!r)
+ r = uvd_v1_0_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2986,6 +3120,10 @@ int r600_suspend(struct radeon_device *rdev)
radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r600_cp_stop(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_suspend(rdev);
+ }
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -3065,6 +3203,14 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ if (rdev->has_uvd) {
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+ }
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3094,6 +3240,10 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_cp_fini(rdev);
r600_irq_fini(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_fini(rdev);
+ }
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -3165,7 +3315,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 8c9b7e26533c..639d6681ef5b 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2052,7 +2052,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
- master_priv->sarea = drm_getsarea(dev);
+ master_priv->sarea = drm_legacy_getsarea(dev);
if (!master_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
r600_do_cleanup_cp(dev);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 4969cef44a19..51fd98553eaf 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f94e7a9afe75..671b48032a3d 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -330,11 +330,12 @@
#define HDP_TILING_CONFIG 0x2F3C
#define HDP_DEBUG1 0x2F34
+#define MC_CONFIG 0x2000
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
#define MC_VM_AGP_BASE 0x218C
#define MC_VM_FB_LOCATION 0x2180
-#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
+#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define ENABLE_L1_STRICT_ORDERING (1 << 2)
@@ -354,12 +355,14 @@
#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
+#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c
#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
@@ -373,6 +376,8 @@
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
+#define RS_DQ_RD_RET_CONF 0x2348
+
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
@@ -1483,6 +1488,7 @@
#define UVD_CGC_GATE 0xf4a8
#define UVD_LMI_CTRL2 0xf4f4
#define UVD_MASTINT_EN 0xf500
+#define UVD_FW_START 0xf51C
#define UVD_LMI_ADDR_EXT 0xf594
#define UVD_LMI_CTRL 0xf598
#define UVD_LMI_SWAP_CNTL 0xf5b4
@@ -1495,6 +1501,13 @@
#define UVD_MPC_SET_MUX 0xf5f4
#define UVD_MPC_SET_ALU 0xf5f8
+#define UVD_VCPU_CACHE_OFFSET0 0xf608
+#define UVD_VCPU_CACHE_SIZE0 0xf60c
+#define UVD_VCPU_CACHE_OFFSET1 0xf610
+#define UVD_VCPU_CACHE_SIZE1 0xf614
+#define UVD_VCPU_CACHE_OFFSET2 0xf618
+#define UVD_VCPU_CACHE_SIZE2 0xf61c
+
#define UVD_VCPU_CNTL 0xf660
#define UVD_SOFT_RESET 0xf680
#define RBC_SOFT_RESET (1<<0)
@@ -1524,9 +1537,35 @@
#define UVD_CONTEXT_ID 0xf6f4
+/* rs780 only */
+#define GFX_MACRO_BYPASS_CNTL 0x30c0
+#define SPLL_BYPASS_CNTL (1 << 0)
+#define UPLL_BYPASS_CNTL (1 << 1)
+
+#define CG_UPLL_FUNC_CNTL 0x7e0
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_FB_DIV(x) ((x) << 4)
+# define UPLL_FB_DIV_MASK 0x0000FFF0
+# define UPLL_REF_DIV(x) ((x) << 16)
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000
# define UPLL_CTLACK_MASK 0x40000000
# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x7e4
+# define UPLL_SW_HILEN(x) ((x) << 0)
+# define UPLL_SW_LOLEN(x) ((x) << 4)
+# define UPLL_SW_HILEN2(x) ((x) << 8)
+# define UPLL_SW_LOLEN2(x) ((x) << 12)
+# define UPLL_DIVEN_MASK 0x00010000
+# define UPLL_DIVEN2_MASK 0x00020000
+# define UPLL_SW_MASK 0x0003FFFF
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
/*
* PM4
@@ -1597,6 +1636,7 @@
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9e1732eb402c..79c988db79ad 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -65,6 +65,8 @@
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/interval_tree.h>
+#include <linux/hashtable.h>
+#include <linux/fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -105,6 +107,7 @@ extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
extern int radeon_use_pflipirq;
+extern int radeon_bapm;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -118,9 +121,6 @@ extern int radeon_use_pflipirq;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
-/* fence seq are set to this number when signaled */
-#define RADEON_FENCE_SIGNALED_SEQ 0LL
-
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
@@ -348,28 +348,32 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
* Fences.
*/
struct radeon_fence_driver {
+ struct radeon_device *rdev;
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
- bool initialized;
+ bool initialized, delayed_irq;
+ struct delayed_work lockup_work;
};
struct radeon_fence {
+ struct fence base;
+
struct radeon_device *rdev;
- struct kref kref;
- /* protected by radeon_fence.lock */
uint64_t seq;
/* RB, DMA, etc. */
unsigned ring;
+
+ wait_queue_t fence_wake;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
@@ -467,7 +471,7 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
- u32 placements[3];
+ struct ttm_place placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -487,6 +491,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
+
+ struct radeon_mn *mn;
+ struct interval_tree_node mn_it;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -778,6 +785,7 @@ struct radeon_irq {
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
@@ -967,7 +975,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib);
+ struct radeon_ib *const_ib, bool hdp_flush);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +985,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool hdp_flush);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool hdp_flush);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1636,7 +1646,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
-void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+ uint32_t allowed_domains);
void radeon_uvd_free_handles(struct radeon_device *rdev,
struct drm_file *filp);
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
@@ -1725,6 +1736,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *cpB);
void radeon_test_syncing(struct radeon_device *rdev);
+/*
+ * MMU Notifier
+ */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
+void radeon_mn_unregister(struct radeon_bo *bo);
/*
* Debugfs
@@ -2138,6 +2154,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -2294,6 +2312,7 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
+ unsigned fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
@@ -2312,7 +2331,7 @@ struct radeon_device {
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
- bool needs_reset;
+ bool needs_reset, in_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2333,7 +2352,6 @@ struct radeon_device {
struct radeon_mec mec;
struct work_struct hotplug_work;
struct work_struct audio_work;
- struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool has_uvd;
@@ -2370,6 +2388,9 @@ struct radeon_device {
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
+
+ struct mutex mn_lock;
+ DECLARE_HASHTABLE(mn_hash, 7);
};
bool radeon_is_px(struct drm_device *dev);
@@ -2425,7 +2446,17 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-#define to_radeon_fence(p) ((struct radeon_fence *)(p))
+extern const struct fence_ops radeon_fence_ops;
+
+static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+{
+ struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
+
+ if (__f->base.ops == &radeon_fence_ops)
+ return __f;
+
+ return NULL;
+}
/*
* Registers read & write functions.
@@ -2745,18 +2776,25 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-#if DRM_DEBUG_CODE == 0
+
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
+ if (ring->count_dw <= 0)
+ DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
-#else
-/* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
-#endif
/*
* ASICs macro.
@@ -2871,6 +2909,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags);
+extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
+extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eeeeabe09758..d91f965e8219 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -965,6 +965,19 @@ static struct radeon_asic r600_asic = {
},
};
+static struct radeon_asic_ring rv6xx_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v1_0_fence_emit,
+ .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic rv6xx_asic = {
.init = &r600_init,
.fini = &r600_fini,
@@ -984,6 +997,7 @@ static struct radeon_asic rv6xx_asic = {
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1074,6 +1088,7 @@ static struct radeon_asic rs780_asic = {
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -2298,7 +2313,15 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
- rdev->has_uvd = true;
+ /* 760G/780V/880V don't have UVD */
+ if ((rdev->pdev->device == 0x9616)||
+ (rdev->pdev->device == 0x9611)||
+ (rdev->pdev->device == 0x9613)||
+ (rdev->pdev->device == 0x9711)||
+ (rdev->pdev->device == 0x9713))
+ rdev->has_uvd = false;
+ else
+ rdev->has_uvd = true;
break;
case CHIP_RV770:
case CHIP_RV730:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 275a5dc01780..987a3b713e06 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -883,6 +883,7 @@ uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void uvd_v1_0_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int uvd_v1_0_resume(struct radeon_device *rdev);
int uvd_v1_0_init(struct radeon_device *rdev);
void uvd_v1_0_fini(struct radeon_device *rdev);
@@ -890,6 +891,8 @@ int uvd_v1_0_start(struct radeon_device *rdev);
void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void uvd_v1_0_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index bb0d5c3a8311..0c388016eecb 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1298,7 +1298,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
- master_priv->sarea = drm_getsarea(dev);
+ master_priv->sarea = drm_legacy_getsarea(dev);
if (!master_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
@@ -2106,9 +2106,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
else
dev_priv->flags |= RADEON_IS_PCI;
- ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
- pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
- _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
+ ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2),
+ pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
+ _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
if (ret != 0)
return ret;
@@ -2135,8 +2135,8 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
/* prebuild the SAREA */
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
- ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
- &master_priv->sarea);
+ ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
+ &master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
kfree(master_priv);
@@ -2162,7 +2162,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
master_priv->sarea_priv = NULL;
if (master_priv->sarea)
- drm_rmmap_locked(dev, master_priv->sarea);
+ drm_legacy_rmmap_locked(dev, master_priv->sarea);
kfree(master_priv);
@@ -2181,9 +2181,9 @@ int radeon_driver_firstopen(struct drm_device *dev)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
- ret = drm_addmap(dev, dev_priv->fb_aper_offset,
- pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
- _DRM_WRITE_COMBINING, &map);
+ ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset,
+ pci_resource_len(dev->pdev, 0),
+ _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
@@ -2196,7 +2196,7 @@ int radeon_driver_unload(struct drm_device *dev)
DRM_DEBUG("\n");
- drm_rmmap(dev, dev_priv->mmio);
+ drm_legacy_rmmap(dev, dev_priv->mmio);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..6e3d1c8f3483 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
unsigned i, j;
- bool duplicate;
+ bool duplicate, need_mmap_lock = false;
+ int r;
if (p->chunk_relocs_idx == -1) {
return 0;
@@ -132,13 +133,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* the buffers used for read only, which doubles the range
* to 0 to 31. 32 is reserved for the kernel driver.
*/
- priority = (r->flags & 0xf) * 2 + !!r->write_domain;
+ priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ + !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in
- VRAM, also but everything into VRAM on AGP cards to avoid
- image corruptions */
+ VRAM, also but everything into VRAM on AGP cards and older
+ IGP chips to avoid image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+ (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
+ p->rdev->family == CHIP_RS780 ||
+ p->rdev->family == CHIP_RS880)) {
+
/* TODO: is this still needed for NI+ ? */
p->relocs[i].prefered_domains =
RADEON_GEM_DOMAIN_VRAM;
@@ -164,6 +169,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].allowed_domains = domain;
}
+ if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
+ uint32_t domain = p->relocs[i].prefered_domains;
+ if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
+ DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
+ "allowed for userptr BOs\n");
+ return -EINVAL;
+ }
+ need_mmap_lock = true;
+ domain = RADEON_GEM_DOMAIN_GTT;
+ p->relocs[i].prefered_domains = domain;
+ p->relocs[i].allowed_domains = domain;
+ }
+
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle;
@@ -176,8 +194,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->cs_flags & RADEON_CS_USE_VM)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
+ if (need_mmap_lock)
+ down_read(&current->mm->mmap_sem);
+
+ r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+
+ if (need_mmap_lock)
+ up_read(&current->mm->mmap_sem);
- return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+ return r;
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -228,11 +253,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
int i;
for (i = 0; i < p->nrelocs; i++) {
+ struct reservation_object *resv;
+ struct fence *fence;
+
if (!p->relocs[i].robj)
continue;
+ resv = p->relocs[i].robj->tbo.resv;
+ fence = reservation_object_get_excl(resv);
+
radeon_semaphore_sync_to(p->ib.semaphore,
- p->relocs[i].robj->tbo.sync_obj);
+ (struct radeon_fence *)fence);
}
}
@@ -402,7 +433,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
- parser->ib.fence);
+ &parser->ib.fence->base);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@@ -450,7 +481,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
radeon_vce_note_usage(rdev);
radeon_cs_sync_rings(parser);
- r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -541,9 +572,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
- r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
}
out:
@@ -628,6 +659,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
up_read(&rdev->exclusive_lock);
return -EBUSY;
}
+ if (rdev->in_reset) {
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ return r;
+ }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c8ea050c8fa4..e84a76e6656a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1253,6 +1253,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
+ rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1270,6 +1271,8 @@ int radeon_device_init(struct radeon_device *rdev,
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
+ mutex_init(&rdev->mn_lock);
+ hash_init(rdev->mn_hash);
r = radeon_gem_init(rdev);
if (r)
return r;
@@ -1395,10 +1398,6 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
- r = radeon_ib_ring_tests(rdev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
r = radeon_gem_debugfs_init(rdev);
if (r) {
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -1416,6 +1415,10 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
@@ -1486,7 +1489,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
- bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1530,12 +1532,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
r = radeon_fence_wait_empty(rdev, i);
if (r) {
/* delay GPU reset to resume */
- force_completion = true;
+ radeon_fence_driver_force_completion(rdev, i);
}
}
- if (force_completion) {
- radeon_fence_driver_force_completion(rdev);
- }
radeon_save_bios_scratch_regs(rdev);
@@ -1675,13 +1674,11 @@ int radeon_gpu_reset(struct radeon_device *rdev)
return 0;
}
- rdev->needs_reset = false;
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
- radeon_pm_suspend(rdev);
radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1693,7 +1690,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
}
}
-retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
@@ -1702,40 +1698,69 @@ retry:
radeon_restore_bios_scratch_regs(rdev);
- if (!r) {
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!r && ring_data[i]) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
- ring_sizes[i] = 0;
- ring_data[i] = NULL;
+ } else {
+ radeon_fence_driver_force_completion(rdev, i);
+ kfree(ring_data[i]);
}
+ }
- r = radeon_ib_ring_tests(rdev);
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ r = radeon_pm_late_init(rdev);
if (r) {
- dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
- if (saved) {
- saved = false;
- radeon_suspend(rdev);
- goto retry;
- }
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
} else {
- radeon_fence_driver_force_completion(rdev);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- kfree(ring_data[i]);
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
+ }
+
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
+ radeon_atom_encoder_init(rdev);
+ radeon_atom_disp_eng_pll_init(rdev);
+ /* turn on the BL */
+ if (rdev->mode_info.bl_encoder) {
+ u8 bl_level = radeon_get_backlight_level(rdev,
+ rdev->mode_info.bl_encoder);
+ radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+ bl_level);
}
}
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
+
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+
+ rdev->in_reset = true;
+ rdev->needs_reset = false;
+
+ downgrade_write(&rdev->exclusive_lock);
- radeon_pm_resume(rdev);
drm_helper_resume_force_mode(rdev->ddev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- if (r) {
+ /* set the power state here in case we are a PX system or headless */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+ radeon_pm_compute_clocks(rdev);
+
+ if (!r) {
+ r = radeon_ib_ring_tests(rdev);
+ if (r && saved)
+ r = -EAGAIN;
+ } else {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
- up_write(&rdev->exclusive_lock);
+ rdev->needs_reset = r == -EAGAIN;
+ rdev->in_reset = false;
+
+ up_read(&rdev->exclusive_lock);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index fd8cd0c3600f..4eb37976f879 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -405,7 +405,9 @@ static void radeon_flip_work_func(struct work_struct *__work)
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
- r = radeon_gpu_reset(rdev);
+ do {
+ r = radeon_gpu_reset(rdev);
+ } while (r == -EAGAIN);
down_read(&rdev->exclusive_lock);
}
if (r)
@@ -474,11 +476,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
obj = new_radeon_fb->obj;
new_rbo = gem_to_radeon_bo(obj);
- spin_lock(&new_rbo->tbo.bdev->fence_lock);
- if (new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
- spin_unlock(&new_rbo->tbo.bdev->fence_lock);
-
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
work->old_rbo, new_rbo);
@@ -497,6 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
+ work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -580,7 +578,6 @@ cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
-
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a773830c6c40..ec7e963d9bf7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -114,6 +114,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gobj,
+ int flags);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
int *vpos, int *hpos, ktime_t *stime,
@@ -134,6 +137,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -179,6 +183,7 @@ int radeon_vm_size = 8;
int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
+int radeon_bapm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -258,6 +263,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, radeon_bapm, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -320,6 +328,7 @@ static struct drm_driver driver_old = {
.preclose = radeon_driver_preclose,
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
+ .set_busid = drm_pci_set_busid,
.unload = radeon_driver_unload,
.suspend = radeon_suspend,
.resume = radeon_resume,
@@ -543,6 +552,7 @@ static struct drm_driver kms_driver = {
.preclose = radeon_driver_preclose_kms,
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
+ .set_busid = drm_pci_set_busid,
.unload = radeon_driver_unload_kms,
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
@@ -568,10 +578,11 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_export = radeon_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = radeon_gem_prime_pin,
.gem_prime_unpin = radeon_gem_prime_unpin,
+ .gem_prime_res_obj = radeon_gem_prime_res_obj,
.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
.gem_prime_vmap = radeon_gem_prime_vmap,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 913787085dfa..af9f2d6bd7d0 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -98,6 +98,25 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
}
/**
+ * radeon_fence_schedule_check - schedule lockup check
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index we should work with
+ *
+ * Queues a delayed work item to check for lockups.
+ */
+static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
+{
+ /*
+ * Do not reset the timer here with mod_delayed_work,
+ * this can livelock in an interaction with TTM delayed destroy.
+ */
+ queue_delayed_work(system_power_efficient_wq,
+ &rdev->fence_drv[ring].lockup_work,
+ RADEON_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
* radeon_fence_emit - emit a fence on the requested ring
*
* @rdev: radeon_device pointer
@@ -111,30 +130,70 @@ int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
+ u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
+
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
- kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+ (*fence)->seq = seq;
(*fence)->ring = ring;
+ fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
+ radeon_fence_schedule_check(rdev, ring);
return 0;
}
/**
- * radeon_fence_process - process a fence
+ * radeon_fence_check_signaled - callback from fence_queue
+ *
+ * this function is called with fence_queue lock held, which is also used
+ * for the fence locking itself, so unlocked variants are used for
+ * fence_signal, and remove_wait_queue.
+ */
+static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+{
+ struct radeon_fence *fence;
+ u64 seq;
+
+ fence = container_of(wait, struct radeon_fence, fence_wake);
+
+ /*
+ * We cannot use radeon_fence_process here because we're already
+ * in the waitqueue, in a call from wake_up_all.
+ */
+ seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
+ if (seq >= fence->seq) {
+ int ret = fence_signal_locked(&fence->base);
+
+ if (!ret)
+ FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->base, "was already signaled\n");
+
+ radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
+ __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
+ fence_put(&fence->base);
+ } else
+ FENCE_TRACE(&fence->base, "pending\n");
+ return 0;
+}
+
+/**
+ * radeon_fence_activity - check for fence activity
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * Checks the current fence value and calculates the last
+ * signalled fence value. Returns true if activity occured
+ * on the ring, and the fence_queue should be waken up.
*/
-void radeon_fence_process(struct radeon_device *rdev, int ring)
+static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
@@ -190,23 +249,77 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake)
- wake_up_all(&rdev->fence_queue);
+ if (seq < last_emitted)
+ radeon_fence_schedule_check(rdev, ring);
+
+ return wake;
}
/**
- * radeon_fence_destroy - destroy a fence
+ * radeon_fence_check_lockup - check for hardware lockup
*
- * @kref: fence kref
+ * @work: delayed work item
*
- * Frees the fence object (all asics).
+ * Checks for fence activity and if there is none probe
+ * the hardware if a lockup occured.
*/
-static void radeon_fence_destroy(struct kref *kref)
+static void radeon_fence_check_lockup(struct work_struct *work)
{
- struct radeon_fence *fence;
+ struct radeon_fence_driver *fence_drv;
+ struct radeon_device *rdev;
+ int ring;
+
+ fence_drv = container_of(work, struct radeon_fence_driver,
+ lockup_work.work);
+ rdev = fence_drv->rdev;
+ ring = fence_drv - &rdev->fence_drv[0];
+
+ if (!down_read_trylock(&rdev->exclusive_lock)) {
+ /* just reschedule the check if a reset is going on */
+ radeon_fence_schedule_check(rdev, ring);
+ return;
+ }
+
+ if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
+ unsigned long irqflags;
+
+ fence_drv->delayed_irq = false;
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ }
+
+ if (radeon_fence_activity(rdev, ring))
+ wake_up_all(&rdev->fence_queue);
- fence = container_of(kref, struct radeon_fence, kref);
- kfree(fence);
+ else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (current fence id "
+ "0x%016llx last fence id 0x%016llx on ring %d)\n",
+ (uint64_t)atomic64_read(&fence_drv->last_seq),
+ fence_drv->sync_seq[ring], ring);
+
+ /* remember that we need an reset */
+ rdev->needs_reset = true;
+ wake_up_all(&rdev->fence_queue);
+ }
+ up_read(&rdev->exclusive_lock);
+}
+
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void radeon_fence_process(struct radeon_device *rdev, int ring)
+{
+ if (radeon_fence_activity(rdev, ring))
+ wake_up_all(&rdev->fence_queue);
}
/**
@@ -237,6 +350,75 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
+static bool radeon_fence_is_signaled(struct fence *f)
+{
+ struct radeon_fence *fence = to_radeon_fence(f);
+ struct radeon_device *rdev = fence->rdev;
+ unsigned ring = fence->ring;
+ u64 seq = fence->seq;
+
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ return true;
+ }
+
+ if (down_read_trylock(&rdev->exclusive_lock)) {
+ radeon_fence_process(rdev, ring);
+ up_read(&rdev->exclusive_lock);
+
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
+ *
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
+ */
+static bool radeon_fence_enable_signaling(struct fence *f)
+{
+ struct radeon_fence *fence = to_radeon_fence(f);
+ struct radeon_device *rdev = fence->rdev;
+
+ if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
+ return false;
+
+ if (down_read_trylock(&rdev->exclusive_lock)) {
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+
+ if (radeon_fence_activity(rdev, fence->ring))
+ wake_up_all_locked(&rdev->fence_queue);
+
+ /* did fence get signaled after we enabled the sw irq? */
+ if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ up_read(&rdev->exclusive_lock);
+ return false;
+ }
+
+ up_read(&rdev->exclusive_lock);
+ } else {
+ /* we're probably in a lockup, lets not fiddle too much */
+ if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
+ rdev->fence_drv[fence->ring].delayed_irq = true;
+ radeon_fence_schedule_check(rdev, fence->ring);
+ }
+
+ fence->fence_wake.flags = 0;
+ fence->fence_wake.private = NULL;
+ fence->fence_wake.func = radeon_fence_check_signaled;
+ __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
+ fence_get(f);
+
+ FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ return true;
+}
+
/**
* radeon_fence_signaled - check if a fence has signaled
*
@@ -247,14 +429,15 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
*/
bool radeon_fence_signaled(struct radeon_fence *fence)
{
- if (!fence) {
+ if (!fence)
return true;
- }
- if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
- return true;
- }
+
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
- fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ int ret;
+
+ ret = fence_signal(&fence->base);
+ if (!ret)
+ FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -283,110 +466,70 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
}
/**
- * radeon_fence_wait_seq - wait for a specific sequence numbers
+ * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_*().
- * Returns 0 if the sequence number has passed, error for all other cases.
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
*/
-static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
- bool intr)
+static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
+ u64 *target_seq, bool intr,
+ long timeout)
{
- uint64_t last_seq[RADEON_NUM_RINGS];
- bool signaled;
- int i, r;
-
- while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ long r;
+ int i;
- /* Save current sequence values, used to check for GPU lockups */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
- continue;
+ if (radeon_fence_any_seq_signaled(rdev, target_seq))
+ return timeout;
- last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
- trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
- radeon_irq_kms_sw_irq_get(rdev, i);
- }
+ /* enable IRQs and tracing */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue, (
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
- || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
- } else {
- r = wait_event_timeout(rdev->fence_queue, (
- (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
- || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
- }
+ trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
- continue;
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue, (
+ radeon_fence_any_seq_signaled(rdev, target_seq)
+ || rdev->needs_reset), timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue, (
+ radeon_fence_any_seq_signaled(rdev, target_seq)
+ || rdev->needs_reset), timeout);
+ }
- radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
- }
+ if (rdev->needs_reset)
+ r = -EDEADLK;
- if (unlikely(r < 0))
- return r;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i])
+ continue;
- if (unlikely(!signaled)) {
- if (rdev->needs_reset)
- return -EDEADLK;
-
- /* we were interrupted for some reason and fence
- * isn't signaled yet, resume waiting */
- if (r)
- continue;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
- continue;
-
- if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
- break;
- }
-
- if (i != RADEON_NUM_RINGS)
- continue;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
- continue;
-
- if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
- break;
- }
-
- if (i < RADEON_NUM_RINGS) {
- /* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (waiting for "
- "0x%016llx last fence id 0x%016llx on"
- " ring %d)\n",
- target_seq[i], last_seq[i], i);
-
- /* remember that we need an reset */
- rdev->needs_reset = true;
- wake_up_all(&rdev->fence_queue);
- return -EDEADLK;
- }
- }
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
}
- return 0;
+
+ return r;
}
/**
* radeon_fence_wait - wait for a fence to signal
*
* @fence: radeon fence object
- * @intr: use interruptable sleep
+ * @intr: use interruptible sleep
*
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
@@ -396,22 +539,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
- int r;
-
- if (fence == NULL) {
- WARN(1, "Querying an invalid fence : %p !\n", fence);
- return -EINVAL;
- }
+ long r;
seq[fence->ring] = fence->seq;
- if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
- return 0;
-
- r = radeon_fence_wait_seq(fence->rdev, seq, intr);
- if (r)
+ r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
return r;
+ }
- fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ r = fence_signal(&fence->base);
+ if (!r)
+ FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return 0;
}
@@ -434,7 +572,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
{
uint64_t seq[RADEON_NUM_RINGS];
unsigned i, num_rings = 0;
- int r;
+ long r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
@@ -445,18 +583,14 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
seq[i] = fences[i]->seq;
++num_rings;
-
- /* test if something was allready signaled */
- if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
- return 0;
}
/* nothing to wait for ? */
if (num_rings == 0)
return -ENOENT;
- r = radeon_fence_wait_seq(rdev, seq, intr);
- if (r) {
+ r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
return r;
}
return 0;
@@ -475,6 +609,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
+ long r;
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
@@ -482,7 +617,10 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
already the last emited fence */
return -ENOENT;
}
- return radeon_fence_wait_seq(rdev, seq, false);
+ r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+ return 0;
}
/**
@@ -498,18 +636,18 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
- int r;
+ long r;
seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
if (!seq[ring])
return 0;
- r = radeon_fence_wait_seq(rdev, seq, false);
- if (r) {
+ r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
if (r == -EDEADLK)
return -EDEADLK;
- dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
ring, r);
}
return 0;
@@ -525,7 +663,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- kref_get(&fence->kref);
+ fence_get(&fence->base);
return fence;
}
@@ -542,7 +680,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- kref_put(&tmp->kref, radeon_fence_destroy);
+ fence_put(&tmp->base);
}
}
@@ -711,6 +849,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].initialized = false;
+ INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
+ radeon_fence_check_lockup);
+ rdev->fence_drv[ring].rdev = rdev;
}
/**
@@ -758,8 +899,9 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
r = radeon_fence_wait_empty(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
- radeon_fence_driver_force_completion(rdev);
+ radeon_fence_driver_force_completion(rdev, ring);
}
+ cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -771,18 +913,16 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
* radeon_fence_driver_force_completion - force all fence waiter to complete
*
* @rdev: radeon device pointer
+ * @ring: the ring to complete
*
* In case of GPU reset failure make sure no process keep waiting on fence
* that will never complete.
*/
-void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
{
- int ring;
-
- for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
- if (!rdev->fence_drv[ring].initialized)
- continue;
+ if (rdev->fence_drv[ring].initialized) {
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
}
}
@@ -833,6 +973,7 @@ static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
down_read(&rdev->exclusive_lock);
seq_printf(m, "%d\n", rdev->needs_reset);
rdev->needs_reset = true;
+ wake_up_all(&rdev->fence_queue);
up_read(&rdev->exclusive_lock);
return 0;
@@ -852,3 +993,72 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+static const char *radeon_fence_get_driver_name(struct fence *fence)
+{
+ return "radeon";
+}
+
+static const char *radeon_fence_get_timeline_name(struct fence *f)
+{
+ struct radeon_fence *fence = to_radeon_fence(f);
+ switch (fence->ring) {
+ case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
+ case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
+ case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
+ case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
+ case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
+ case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
+ case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
+ case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
+ default: WARN_ON_ONCE(1); return "radeon.unk";
+ }
+}
+
+static inline bool radeon_test_signaled(struct radeon_fence *fence)
+{
+ return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+}
+
+static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+ signed long t)
+{
+ struct radeon_fence *fence = to_radeon_fence(f);
+ struct radeon_device *rdev = fence->rdev;
+ bool signaled;
+
+ fence_enable_sw_signaling(&fence->base);
+
+ /*
+ * This function has to return -EDEADLK, but cannot hold
+ * exclusive_lock during the wait because some callers
+ * may already hold it. This means checking needs_reset without
+ * lock, and not fiddling with any gpu internals.
+ *
+ * The callback installed with fence_enable_sw_signaling will
+ * run before our wait_event_*timeout call, so we will see
+ * both the signaled fence and the changes to needs_reset.
+ */
+
+ if (intr)
+ t = wait_event_interruptible_timeout(rdev->fence_queue,
+ ((signaled = radeon_test_signaled(fence)) ||
+ rdev->needs_reset), t);
+ else
+ t = wait_event_timeout(rdev->fence_queue,
+ ((signaled = radeon_test_signaled(fence)) ||
+ rdev->needs_reset), t);
+
+ if (t > 0 && !signaled)
+ return -EDEADLK;
+ return t;
+}
+
+const struct fence_ops radeon_fence_ops = {
+ .get_driver_name = radeon_fence_get_driver_name,
+ .get_timeline_name = radeon_fence_get_timeline_name,
+ .enable_signaling = radeon_fence_enable_signaling,
+ .signaled = radeon_fence_is_signaled,
+ .wait = radeon_fence_default_wait,
+ .release = NULL,
+};
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bfd7e1b0ff3f..4b7c8ec36c2f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -94,7 +94,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
{
struct radeon_bo *robj;
uint32_t domain;
- int r;
+ long r;
/* FIXME: reeimplement */
robj = gem_to_radeon_bo(gobj);
@@ -110,9 +110,12 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_bo_wait(robj, NULL, false);
- if (r) {
- printk(KERN_ERR "Failed to wait for object !\n");
+ r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ if (!r)
+ r = -EBUSY;
+
+ if (r < 0 && r != -EINTR) {
+ printk(KERN_ERR "Failed to wait for object: %li\n", r);
return r;
}
}
@@ -272,6 +275,94 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_userptr *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *bo;
+ uint32_t handle;
+ int r;
+
+ if (offset_in_page(args->addr | args->size))
+ return -EINVAL;
+
+ /* reject unknown flag values */
+ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
+ RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
+ RADEON_GEM_USERPTR_REGISTER))
+ return -EINVAL;
+
+ if (args->flags & RADEON_GEM_USERPTR_READONLY) {
+ /* readonly pages not tested on older hardware */
+ if (rdev->family < CHIP_R600)
+ return -EINVAL;
+
+ } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
+ !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
+
+ /* if we want to write to it we must require anonymous
+ memory and install a MMU notifier */
+ return -EACCES;
+ }
+
+ down_read(&rdev->exclusive_lock);
+
+ /* create a gem object to contain this object in */
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_CPU, 0,
+ false, &gobj);
+ if (r)
+ goto handle_lockup;
+
+ bo = gem_to_radeon_bo(gobj);
+ r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+ if (r)
+ goto release_object;
+
+ if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
+ r = radeon_mn_register(bo, args->addr);
+ if (r)
+ goto release_object;
+ }
+
+ if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
+ down_read(&current->mm->mmap_sem);
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ up_read(&current->mm->mmap_sem);
+ goto release_object;
+ }
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ radeon_bo_unreserve(bo);
+ up_read(&current->mm->mmap_sem);
+ if (r)
+ goto release_object;
+ }
+
+ r = drm_gem_handle_create(filp, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r)
+ goto handle_lockup;
+
+ args->handle = handle;
+ up_read(&rdev->exclusive_lock);
+ return 0;
+
+release_object:
+ drm_gem_object_unreference_unlocked(gobj);
+
+handle_lockup:
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gem_handle_lockup(rdev, r);
+
+ return r;
+}
+
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -315,6 +406,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
+ if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
+ drm_gem_object_unreference_unlocked(gobj);
+ return -EPERM;
+ }
*offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
@@ -357,15 +452,22 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- int r;
+ int r = 0;
uint32_t cur_placement = 0;
+ long ret;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, false);
+
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ if (ret == 0)
+ r = -EBUSY;
+ else if (ret < 0)
+ r = ret;
+
/* Flush HDP cache via MMIO if necessary */
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
@@ -532,6 +634,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
+
+ r = -EPERM;
+ if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
+ goto out;
+
r = radeon_bo_reserve(robj, false);
if (unlikely(r))
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 65b0c213488d..6fc7461d70c4 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* @rdev: radeon_device pointer
* @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only)
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* to SI there was just a DE IB.
*/
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
+ struct radeon_ib *const_ib, bool hdp_flush)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm)
radeon_vm_fence(rdev, ib->vm, ib->fence);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, hdp_flush);
return 0;
}
@@ -268,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
r = radeon_ib_test(rdev, i, ring);
if (r) {
+ radeon_fence_driver_force_completion(rdev, i);
ring->ready = false;
rdev->needs_reset = false;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 16807afab362..7784911d78ef 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -88,23 +88,6 @@ static void radeon_hotplug_work_func(struct work_struct *work)
}
/**
- * radeon_irq_reset_work_func - execute gpu reset
- *
- * @work: work struct
- *
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
- */
-static void radeon_irq_reset_work_func(struct work_struct *work)
-{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- reset_work);
-
- radeon_gpu_reset(rdev);
-}
-
-/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
@@ -284,7 +267,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
@@ -342,6 +324,21 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
}
/**
+ * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
+{
+ return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
+}
+
+/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index eb7164d07985..8309b11e674d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
new file mode 100644
index 000000000000..a69bd441dd2d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mmu_notifier.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+
+struct radeon_mn {
+ /* constant after initialisation */
+ struct radeon_device *rdev;
+ struct mm_struct *mm;
+ struct mmu_notifier mn;
+
+ /* only used on destruction */
+ struct work_struct work;
+
+ /* protected by rdev->mn_lock */
+ struct hlist_node node;
+
+ /* objects protected by lock */
+ struct mutex lock;
+ struct rb_root objects;
+};
+
+/**
+ * radeon_mn_destroy - destroy the rmn
+ *
+ * @work: previously sheduled work item
+ *
+ * Lazy destroys the notifier from a work item
+ */
+static void radeon_mn_destroy(struct work_struct *work)
+{
+ struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
+ struct radeon_device *rdev = rmn->rdev;
+ struct radeon_bo *bo, *next;
+
+ mutex_lock(&rdev->mn_lock);
+ mutex_lock(&rmn->lock);
+ hash_del(&rmn->node);
+ rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
+ interval_tree_remove(&bo->mn_it, &rmn->objects);
+ bo->mn = NULL;
+ }
+ mutex_unlock(&rmn->lock);
+ mutex_unlock(&rdev->mn_lock);
+ mmu_notifier_unregister(&rmn->mn, rmn->mm);
+ kfree(rmn);
+}
+
+/**
+ * radeon_mn_release - callback to notify about mm destruction
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ *
+ * Shedule a work item to lazy destroy our notifier.
+ */
+static void radeon_mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ INIT_WORK(&rmn->work, radeon_mn_destroy);
+ schedule_work(&rmn->work);
+}
+
+/**
+ * radeon_mn_invalidate_range_start - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * We block for all BOs between start and end to be idle and
+ * unmap them by move them into system domain again.
+ */
+static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct interval_tree_node *it;
+
+ /* notification is exclusive, but interval is inclusive */
+ end -= 1;
+
+ mutex_lock(&rmn->lock);
+
+ it = interval_tree_iter_first(&rmn->objects, start, end);
+ while (it) {
+ struct radeon_bo *bo;
+ struct fence *fence;
+ int r;
+
+ bo = container_of(it, struct radeon_bo, mn_it);
+ it = interval_tree_iter_next(it, start, end);
+
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%d) failed to reserve user bo\n", r);
+ continue;
+ }
+
+ fence = reservation_object_get_excl(bo->tbo.resv);
+ if (fence) {
+ r = radeon_fence_wait((struct radeon_fence *)fence, false);
+ if (r)
+ DRM_ERROR("(%d) failed to wait for user bo\n", r);
+ }
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%d) failed to validate user bo\n", r);
+
+ radeon_bo_unreserve(bo);
+ }
+
+ mutex_unlock(&rmn->lock);
+}
+
+static const struct mmu_notifier_ops radeon_mn_ops = {
+ .release = radeon_mn_release,
+ .invalidate_range_start = radeon_mn_invalidate_range_start,
+};
+
+/**
+ * radeon_mn_get - create notifier context
+ *
+ * @rdev: radeon device pointer
+ *
+ * Creates a notifier context for current->mm.
+ */
+static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
+{
+ struct mm_struct *mm = current->mm;
+ struct radeon_mn *rmn;
+ int r;
+
+ down_write(&mm->mmap_sem);
+ mutex_lock(&rdev->mn_lock);
+
+ hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
+ if (rmn->mm == mm)
+ goto release_locks;
+
+ rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
+ if (!rmn) {
+ rmn = ERR_PTR(-ENOMEM);
+ goto release_locks;
+ }
+
+ rmn->rdev = rdev;
+ rmn->mm = mm;
+ rmn->mn.ops = &radeon_mn_ops;
+ mutex_init(&rmn->lock);
+ rmn->objects = RB_ROOT;
+
+ r = __mmu_notifier_register(&rmn->mn, mm);
+ if (r)
+ goto free_rmn;
+
+ hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
+
+release_locks:
+ mutex_unlock(&rdev->mn_lock);
+ up_write(&mm->mmap_sem);
+
+ return rmn;
+
+free_rmn:
+ mutex_unlock(&rdev->mn_lock);
+ up_write(&mm->mmap_sem);
+ kfree(rmn);
+
+ return ERR_PTR(r);
+}
+
+/**
+ * radeon_mn_register - register a BO for notifier updates
+ *
+ * @bo: radeon buffer object
+ * @addr: userptr addr we should monitor
+ *
+ * Registers an MMU notifier for the given BO at the specified address.
+ * Returns 0 on success, -ERRNO if anything goes wrong.
+ */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
+{
+ unsigned long end = addr + radeon_bo_size(bo) - 1;
+ struct radeon_device *rdev = bo->rdev;
+ struct radeon_mn *rmn;
+ struct interval_tree_node *it;
+
+ rmn = radeon_mn_get(rdev);
+ if (IS_ERR(rmn))
+ return PTR_ERR(rmn);
+
+ mutex_lock(&rmn->lock);
+
+ it = interval_tree_iter_first(&rmn->objects, addr, end);
+ if (it) {
+ mutex_unlock(&rmn->lock);
+ return -EEXIST;
+ }
+
+ bo->mn = rmn;
+ bo->mn_it.start = addr;
+ bo->mn_it.last = end;
+ interval_tree_insert(&bo->mn_it, &rmn->objects);
+
+ mutex_unlock(&rmn->lock);
+
+ return 0;
+}
+
+/**
+ * radeon_mn_unregister - unregister a BO for notifier updates
+ *
+ * @bo: radeon buffer object
+ *
+ * Remove any registration of MMU notifier updates from the buffer object.
+ */
+void radeon_mn_unregister(struct radeon_bo *bo)
+{
+ struct radeon_device *rdev = bo->rdev;
+ struct radeon_mn *rmn;
+
+ mutex_lock(&rdev->mn_lock);
+ rmn = bo->mn;
+ if (rmn == NULL) {
+ mutex_unlock(&rdev->mn_lock);
+ return;
+ }
+
+ mutex_lock(&rmn->lock);
+ interval_tree_remove(&bo->mn_it, &rmn->objects);
+ bo->mn = NULL;
+ mutex_unlock(&rmn->lock);
+ mutex_unlock(&rdev->mn_lock);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 480c87d8edc5..aadbd36e64b9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+ radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
@@ -96,40 +97,56 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0, i;
- rbo->placement.fpfn = 0;
- rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
+
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else {
- rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_TT;
}
}
+
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
+
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else {
- rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_SYSTEM;
}
}
if (!c)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ rbo->placements[i].fpfn = 0;
+ rbo->placements[i].lpfn = 0;
+ }
+
/*
* Use two-ended allocation depending on the buffer size to
* improve fragmentation quality.
@@ -137,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
*/
if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
- rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
+ rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
}
}
}
@@ -264,6 +281,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
{
int r, i;
+ if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+ return -EPERM;
+
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
@@ -283,21 +303,22 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
- if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ unsigned lpfn = 0;
+
/* force to pin into visible video ram */
- bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- }
- if (max_offset) {
- u64 lpfn = max_offset >> PAGE_SHIFT;
+ if (bo->placements[i].flags & TTM_PL_FLAG_VRAM)
+ lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ else
+ lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */
- if (!bo->placement.lpfn)
- bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+ if (max_offset)
+ lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT));
- if (lpfn < bo->placement.lpfn)
- bo->placement.lpfn = lpfn;
+ bo->placements[i].lpfn = lpfn;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -329,8 +350,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->pin_count--;
if (bo->pin_count)
return 0;
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ bo->placements[i].lpfn = 0;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
@@ -459,7 +482,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
- r = ttm_eu_reserve_buffers(ticket, head);
+ r = ttm_eu_reserve_buffers(ticket, head, true);
if (unlikely(r != 0)) {
return r;
}
@@ -468,6 +491,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
+ u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -479,7 +503,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
* into account. We don't want to disallow buffer moves
* completely.
*/
- if ((lobj->allowed_domains & current_domain) != 0 &&
+ if ((allowed & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -489,7 +513,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
retry:
radeon_ttm_placement_from_domain(bo, domain);
if (ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_force_into_uvd_segment(bo);
+ radeon_uvd_force_into_uvd_segment(bo, allowed);
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
@@ -731,7 +755,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -755,12 +779,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0))
return r;
- spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
- if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.bdev->fence_lock);
+
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7fd665a22908..32522cc940a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
-
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
- /* Can't set dpm state when the card is off */
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
goto fail;
}
mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+
+ /* Can't set dpm state when the card is off */
+ if (!(rdev->flags & RADEON_IS_PX) ||
+ (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+ radeon_pm_compute_clocks(rdev);
+
fail:
return count;
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f7e48d329db3..d5414d42e44b 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -103,3 +103,21 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
radeon_bo_unpin(bo);
radeon_bo_unreserve(bo);
}
+
+
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+ return bo->tbo.resv;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gobj,
+ int flags)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(gobj);
+ if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+ return ERR_PTR(-EPERM);
+ return drm_gem_prime_export(dev, gobj, flags);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5b4e0cf231a0..6f2a9bd6bb54 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -45,27 +45,6 @@
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
/**
- * radeon_ring_write - write a value to the ring
- *
- * @ring: radeon_ring structure holding ring information
- * @v: dword (dw) value to write
- *
- * Write a value to the requested ring buffer (all asics).
- */
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
-/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
@@ -177,16 +156,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+ bool hdp_flush)
{
/* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding.
*/
- if (rdev->asic->ring[ring->idx]->hdp_flush)
+ if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
@@ -196,7 +177,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
/* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished.
*/
- if (rdev->asic->mmio_hdp_flush)
+ if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
@@ -207,12 +188,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+ bool hdp_flush)
{
- radeon_ring_commit(rdev, ring);
+ radeon_ring_commit(rdev, ring, hdp_flush);
mutex_unlock(&rdev->ring_lock);
}
@@ -372,7 +355,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, data[i]);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
kfree(data);
return 0;
}
@@ -400,9 +383,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- (rdev->flags & RADEON_IS_PCIE) ?
- RADEON_GEM_GTT_WC : 0,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index dbd6bcde92de..56d9fd66d8ae 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
continue;
}
- radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring);
semaphore->gpu_addr += 8;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 23bb64fd775f..535403e0c8a2 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -30,9 +30,9 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_buffer.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
+#include "drm_buffer.h"
/* ================================================================
* Helper functions for client state checking and fixup
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5adf4207453d..17bc3dced9f1 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
return r;
}
radeon_fence_emit(rdev, fence, ring->idx);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
}
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence1, false);
if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence2, false);
if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
if (r)
goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
+ radeon_ring_unlock_commit(rdev, ringC, false);
for (i = 0; i < 30; ++i) {
mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
+ radeon_ring_unlock_commit(rdev, ringC, false);
mdelay(1000);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 72afe82a95c9..62d1f4d730a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,6 +39,8 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swiotlb.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -176,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ static struct ttm_place placements = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ };
+
struct radeon_bo *rbo;
- static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
- placement->fpfn = 0;
- placement->lpfn = 0;
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -265,12 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- fence = bo->sync_obj;
+ fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv);
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
@@ -284,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- u32 placements;
+ struct ttm_place placements;
struct ttm_placement placement;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- placement.fpfn = 0;
- placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
- placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.fpfn = 0;
+ placements.lpfn = 0;
+ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
@@ -332,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
- u32 placements;
+ struct ttm_place placements;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- placement.fpfn = 0;
- placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
- placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.fpfn = 0;
+ placements.lpfn = 0;
+ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
@@ -483,39 +488,108 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
-{
- return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
-}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ u64 offset;
-static int radeon_sync_obj_flush(void *sync_obj)
+ uint64_t userptr;
+ struct mm_struct *usermm;
+ uint32_t userflags;
+};
+
+/* prepare the sg table with the user pages */
+static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
+ struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned pinned = 0, nents;
+ int r;
+
+ int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ if (current->mm != gtt->usermm)
+ return -EPERM;
+
+ if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
+ /* check that we only pin down anonymous memory
+ to prevent problems with writeback */
+ unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+ struct vm_area_struct *vma;
+ vma = find_vma(gtt->usermm, gtt->userptr);
+ if (!vma || vma->vm_file || vma->vm_end < end)
+ return -EPERM;
+ }
+
+ do {
+ unsigned num_pages = ttm->num_pages - pinned;
+ uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
+ struct page **pages = ttm->pages + pinned;
+
+ r = get_user_pages(current, current->mm, userptr, num_pages,
+ write, 0, pages, NULL);
+ if (r < 0)
+ goto release_pages;
+
+ pinned += r;
+
+ } while (pinned < ttm->num_pages);
+
+ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+ ttm->num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (r)
+ goto release_sg;
+
+ r = -ENOMEM;
+ nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+ if (nents != ttm->sg->nents)
+ goto release_sg;
+
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+
return 0;
-}
-static void radeon_sync_obj_unref(void **sync_obj)
-{
- radeon_fence_unref((struct radeon_fence **)sync_obj);
-}
+release_sg:
+ kfree(ttm->sg);
-static void *radeon_sync_obj_ref(void *sync_obj)
-{
- return radeon_fence_ref((struct radeon_fence *)sync_obj);
+release_pages:
+ release_pages(ttm->pages, pinned, 0);
+ return r;
}
-static bool radeon_sync_obj_signaled(void *sync_obj)
+static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- return radeon_fence_signaled((struct radeon_fence *)sync_obj);
-}
+ struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ struct scatterlist *sg;
+ int i;
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_tt {
- struct ttm_dma_tt ttm;
- struct radeon_device *rdev;
- u64 offset;
-};
+ int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ /* free the sg table and pages again */
+ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+
+ for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
+ struct page *page = sg_page(sg);
+
+ if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+
+ sg_free_table(ttm->sg);
+}
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
@@ -525,6 +599,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
RADEON_GART_PAGE_WRITE;
int r;
+ if (gtt->userptr) {
+ radeon_ttm_tt_pin_userptr(ttm);
+ flags &= ~RADEON_GART_PAGE_WRITE;
+ }
+
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -547,6 +626,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+
+ if (gtt->userptr)
+ radeon_ttm_tt_unpin_userptr(ttm);
+
return 0;
}
@@ -603,6 +686,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
if (ttm->state != tt_unpopulated)
return 0;
+ if (gtt->userptr) {
+ ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+ if (!ttm->sg)
+ return -ENOMEM;
+
+ ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
@@ -652,6 +745,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ if (gtt->userptr) {
+ kfree(ttm->sg);
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ return;
+ }
+
if (slave)
return;
@@ -680,6 +779,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
+int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL)
+ return -EINVAL;
+
+ gtt->userptr = addr;
+ gtt->usermm = current->mm;
+ gtt->userflags = flags;
+ return 0;
+}
+
+bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL)
+ return false;
+
+ return !!gtt->userptr;
+}
+
+bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL)
+ return false;
+
+ return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -689,11 +822,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
- .sync_obj_signaled = &radeon_sync_obj_signaled,
- .sync_obj_wait = &radeon_sync_obj_wait,
- .sync_obj_flush = &radeon_sync_obj_flush,
- .sync_obj_unref = &radeon_sync_obj_unref,
- .sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6bf55ec85b62..ba4f38916026 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -40,12 +40,18 @@
#define UVD_IDLE_TIMEOUT_MS 1000
/* Firmware Names */
+#define FIRMWARE_R600 "radeon/R600_uvd.bin"
+#define FIRMWARE_RS780 "radeon/RS780_uvd.bin"
+#define FIRMWARE_RV770 "radeon/RV770_uvd.bin"
#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
+MODULE_FIRMWARE(FIRMWARE_R600);
+MODULE_FIRMWARE(FIRMWARE_RS780);
+MODULE_FIRMWARE(FIRMWARE_RV770);
MODULE_FIRMWARE(FIRMWARE_RV710);
MODULE_FIRMWARE(FIRMWARE_CYPRESS);
MODULE_FIRMWARE(FIRMWARE_SUMO);
@@ -63,6 +69,23 @@ int radeon_uvd_init(struct radeon_device *rdev)
INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
switch (rdev->family) {
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ case CHIP_RV620:
+ case CHIP_RV635:
+ fw_name = FIRMWARE_R600;
+ break;
+
+ case CHIP_RS780:
+ case CHIP_RS880:
+ fw_name = FIRMWARE_RS780;
+ break;
+
+ case CHIP_RV770:
+ fw_name = FIRMWARE_RV770;
+ break;
+
case CHIP_RV710:
case CHIP_RV730:
case CHIP_RV740:
@@ -115,7 +138,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
}
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
- RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+ RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
+ RADEON_GPU_PAGE_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
@@ -231,10 +255,30 @@ int radeon_uvd_resume(struct radeon_device *rdev)
return 0;
}
-void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+ uint32_t allowed_domains)
{
- rbo->placement.fpfn = 0 >> PAGE_SHIFT;
- rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < rbo->placement.num_placement; ++i) {
+ rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+ rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ }
+
+ /* If it must be in VRAM it must be in the first segment as well */
+ if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
+ return;
+
+ /* abort if we already have more than one placement */
+ if (rbo->placement.num_placement > 1)
+ return;
+
+ /* add another 256MB segment */
+ rbo->placements[1] = rbo->placements[0];
+ rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+ rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+ rbo->placement.num_placement++;
+ rbo->placement.num_busy_placement++;
}
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
@@ -356,6 +400,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
+ struct fence *f;
void *ptr;
int i, r;
@@ -365,8 +410,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- if (bo->tbo.sync_obj) {
- r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ f = reservation_object_get_excl(bo->tbo.resv);
+ if (f) {
+ r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
return r;
@@ -604,38 +650,16 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
}
static int radeon_uvd_send_msg(struct radeon_device *rdev,
- int ring, struct radeon_bo *bo,
+ int ring, uint64_t addr,
struct radeon_fence **fence)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
struct radeon_ib ib;
- uint64_t addr;
int i, r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head);
- if (r)
- return r;
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
- radeon_uvd_force_into_uvd_segment(bo);
-
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto err;
-
r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
if (r)
- goto err;
+ return r;
- addr = radeon_bo_gpu_offset(bo);
ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
ib.ptr[1] = addr;
ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
@@ -646,20 +670,12 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.ptr[i] = PACKET2(0);
ib.length_dw = 16;
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r)
- goto err;
- ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (fence)
*fence = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
- radeon_bo_unref(&bo);
- return 0;
-
-err:
- ttm_eu_backoff_reservation(&ticket, &head);
return r;
}
@@ -669,27 +685,18 @@ err:
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
- struct radeon_bo *bo;
- uint32_t *msg;
- int r, i;
+ /* we use the last page of the vcpu bo for the UVD message */
+ uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
+ RADEON_GPU_PAGE_SIZE;
- r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
- if (r)
- return r;
+ uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint64_t addr = rdev->uvd.gpu_addr + offs;
- r = radeon_bo_reserve(bo, false);
- if (r) {
- radeon_bo_unref(&bo);
- return r;
- }
+ int r, i;
- r = radeon_bo_kmap(bo, (void **)&msg);
- if (r) {
- radeon_bo_unreserve(bo);
- radeon_bo_unref(&bo);
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
+ if (r)
return r;
- }
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
@@ -706,36 +713,26 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- radeon_bo_kunmap(bo);
- radeon_bo_unreserve(bo);
-
- return radeon_uvd_send_msg(rdev, ring, bo, fence);
+ r = radeon_uvd_send_msg(rdev, ring, addr, fence);
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ return r;
}
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
- struct radeon_bo *bo;
- uint32_t *msg;
- int r, i;
+ /* we use the last page of the vcpu bo for the UVD message */
+ uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
+ RADEON_GPU_PAGE_SIZE;
- r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
- if (r)
- return r;
+ uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint64_t addr = rdev->uvd.gpu_addr + offs;
- r = radeon_bo_reserve(bo, false);
- if (r) {
- radeon_bo_unref(&bo);
- return r;
- }
+ int r, i;
- r = radeon_bo_kmap(bo, (void **)&msg);
- if (r) {
- radeon_bo_unreserve(bo);
- radeon_bo_unref(&bo);
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
+ if (r)
return r;
- }
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
@@ -745,10 +742,9 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- radeon_bo_kunmap(bo);
- radeon_bo_unreserve(bo);
-
- return radeon_uvd_send_msg(rdev, ring, bo, fence);
+ r = radeon_uvd_send_msg(rdev, ring, addr, fence);
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f9b70a43aa52..c7190aadbd89 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
radeon_ring_write(ring, VCE_CMD_END);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
if (vce_v1_0_get_rptr(rdev, ring) != rptr)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ccae4d9dc3de..671ee566aa51 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
- r = ttm_eu_reserve_buffers(&ticket, &head);
+ r = ttm_eu_reserve_buffers(&ticket, &head, true);
if (r)
return r;
@@ -420,11 +420,11 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > 64);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
goto error;
- ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+ ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
radeon_ib_free(rdev, &ib);
return 0;
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* add a clone of the bo_va to clear the old address */
struct radeon_bo_va *tmp;
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
tmp->vm = vm;
@@ -689,11 +693,17 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ struct fence *fence;
+
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
+
+ fence = reservation_object_get_excl(pd->tbo.resv);
+ radeon_semaphore_sync_to(ib.semaphore,
+ (struct radeon_fence *)fence);
+
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
@@ -816,8 +826,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
+ struct fence *fence;
- radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
+ fence = reservation_object_get_excl(pt->tbo.resv);
+ radeon_semaphore_sync_to(ib->semaphore,
+ (struct radeon_fence *)fence);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -888,6 +901,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
+ if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+ bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
+
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
@@ -957,7 +973,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 3e21e869015f..8a477bf1fdb3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index bbf2e076ee45..74426ac2bb5c 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 011779bd2b3d..a1274a31405c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j, k;
+ int i, j;
switch (rdev->family) {
case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
+ rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
- for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
- rdev->config.si.active_cus +=
- hweight32(si_get_cu_active_bitmap(rdev, i, j));
- }
+ rdev->config.si.active_cus +=
+ hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
}
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
si_cp_enable(rdev, true);
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
@@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 716505129450..7c22baaf94db 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 32e50be9c4ac..57f780053b3e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on with
- * bapm enabled when switching between AC and battery
- * power. At the same time, some MSI boards hang
- * if it's not enabled and dpm is enabled. Just enable
- * it for MSI boards right now.
- */
- if (rdev->pdev->subsystem_vendor == 0x1462)
- pi->enable_bapm = true;
- else
+ if (radeon_bapm == -1) {
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
+ */
+ if (rdev->pdev->subsystem_vendor == 0x1462)
+ pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
+ } else if (radeon_bapm == 0) {
pi->enable_bapm = false;
+ } else {
+ pi->enable_bapm = true;
+ }
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index be42c8125203..e72b3cb59358 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -22,6 +22,7 @@
* Authors: Christian König <christian.koenig@amd.com>
*/
+#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
@@ -70,6 +71,82 @@ void uvd_v1_0_set_wptr(struct radeon_device *rdev,
}
/**
+ * uvd_v1_0_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v1_0_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
+/**
+ * uvd_v1_0_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v1_0_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = (rdev->uvd.gpu_addr >> 3) + 16;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
+
+ return 0;
+}
+
+/**
* uvd_v1_0_init - start and test UVD block
*
* @rdev: radeon_device pointer
@@ -124,14 +201,38 @@ int uvd_v1_0_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
radeon_ring_write(ring, 3);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
done:
/* lower clocks again */
radeon_set_uvd_clocks(rdev, 0, 0);
- if (!r)
+ if (!r) {
+ switch (rdev->family) {
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV620:
+ /* 64byte granularity workaround */
+ WREG32(MC_CONFIG, 0);
+ WREG32(MC_CONFIG, 1 << 4);
+ WREG32(RS_DQ_RD_RET_CONF, 0x3f);
+ WREG32(MC_CONFIG, 0x1f);
+
+ /* fall through */
+ case CHIP_RV670:
+ case CHIP_RV635:
+
+ /* write clean workaround */
+ WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
+ break;
+
+ default:
+ /* TODO: Do we need more? */
+ break;
+ }
+
DRM_INFO("UVD initialized successfully.\n");
+ }
return r;
}
@@ -218,12 +319,12 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* enable UMC */
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
mdelay(10);
- WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
@@ -331,7 +432,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 8bfdadd56598..89193519f8a1 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -72,6 +72,10 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
uint32_t chip_id, size;
int r;
+ /* RV770 uses V1.0 MC */
+ if (rdev->family == CHIP_RV770)
+ return uvd_v1_0_resume(rdev);
+
r = radeon_uvd_resume(rdev);
if (r)
return r;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fda64b7b73e8..672d2fcba009 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -158,6 +158,7 @@ static struct drm_driver rcar_du_driver = {
.unload = rcar_du_unload,
.preclose = rcar_du_preclose,
.lastclose = rcar_du_lastclose,
+ .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index c97cdc9ab239..9288d3037ce5 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -556,7 +556,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
/*
* Initialize mappings. On Savage4 and SavageIX the alignment
* and size of the aperture is not suitable for automatic MTRR setup
- * in drm_addmap. Therefore we add them manually before the maps are
+ * in drm_legacy_addmap. Therefore we add them manually before the maps are
* initialized, and tear them down on last close.
*/
int savage_driver_firstopen(struct drm_device *dev)
@@ -624,19 +624,20 @@ int savage_driver_firstopen(struct drm_device *dev)
/* Automatic MTRR setup will do the right thing. */
}
- ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
- _DRM_READ_ONLY, &dev_priv->mmio);
+ ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
+ _DRM_REGISTERS, _DRM_READ_ONLY,
+ &dev_priv->mmio);
if (ret)
return ret;
- ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
- _DRM_WRITE_COMBINING, &dev_priv->fb);
+ ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
+ _DRM_WRITE_COMBINING, &dev_priv->fb);
if (ret)
return ret;
- ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
- _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
- &dev_priv->aperture);
+ ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
+ _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
+ &dev_priv->aperture);
return ret;
}
@@ -698,7 +699,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
dev_priv->texture_offset = init->texture_offset;
dev_priv->texture_size = init->texture_size;
- dev_priv->sarea = drm_getsarea(dev);
+ dev_priv->sarea = drm_legacy_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);
@@ -1050,7 +1051,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
return;
if (file_priv->master && file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
+ drm_legacy_idlelock_take(&file_priv->master->lock);
release_idlelock = 1;
}
@@ -1069,7 +1070,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
if (release_idlelock)
- drm_idlelock_release(&file_priv->master->lock);
+ drm_legacy_idlelock_release(&file_priv->master->lock);
}
const struct drm_ioctl_desc savage_ioctls[] = {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3c030216e888..1b09d2182037 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -57,6 +57,7 @@ static struct drm_driver driver = {
.preclose = savage_reclaim_buffers,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
+ .set_busid = drm_pci_set_busid,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index ff4ba483b602..873d12f851bf 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -267,6 +267,7 @@ static struct drm_driver shmob_drm_driver = {
.load = shmob_drm_load,
.unload = shmob_drm_unload,
.preclose = shmob_drm_preclose,
+ .set_busid = drm_platform_set_busid,
.irq_handler = shmob_drm_irq,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = shmob_drm_enable_vblank,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 756f787b7143..54858e6fedaf 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -108,6 +108,7 @@ static struct drm_driver driver = {
.open = sis_driver_open,
.preclose = sis_reclaim_buffers_locked,
.postclose = sis_driver_postclose,
+ .set_busid = drm_pci_set_busid,
.dma_quiescent = sis_idle,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 77f288e4a0a6..93ad8a5704d1 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -319,12 +319,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
if (!(file->minor->master && file->master->lock.hw_lock))
return;
- drm_idlelock_take(&file->master->lock);
+ drm_legacy_idlelock_take(&file->master->lock);
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
- drm_idlelock_release(&file->master->lock);
+ drm_legacy_idlelock_release(&file->master->lock);
return;
}
@@ -345,7 +345,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
}
mutex_unlock(&dev->struct_mutex);
- drm_idlelock_release(&file->master->lock);
+ drm_legacy_idlelock_release(&file->master->lock);
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 3492ca5c46d3..df533ff999a4 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -55,6 +55,7 @@ static const struct file_operations tdfx_driver_fops = {
};
static struct drm_driver driver = {
+ .set_busid = drm_pci_set_busid,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ef40381f3909..6553fd238685 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -18,6 +18,8 @@
struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
+ bool supports_block_linear;
+ unsigned int pitch_align;
};
struct tegra_plane {
@@ -212,15 +214,44 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
- if (window->tiled) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = window->tiling.value;
+
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
value = WIN_ENABLE;
@@ -288,6 +319,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
unsigned int i;
+ int err;
memset(&window, 0, sizeof(window));
window.src.x = src_x >> 16;
@@ -301,7 +333,10 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.format = tegra_dc_format(fb->pixel_format, &window.swap);
window.bits_per_pixel = fb->bits_per_pixel;
window.bottom_up = tegra_fb_is_bottom_up(fb);
- window.tiled = tegra_fb_is_tiled(fb);
+
+ err = tegra_fb_get_tiling(fb, &window.tiling);
+ if (err < 0)
+ return err;
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -402,8 +437,14 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
{
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
+ struct tegra_bo_tiling tiling;
unsigned int format, swap;
unsigned long value;
+ int err;
+
+ err = tegra_fb_get_tiling(fb, &tiling);
+ if (err < 0)
+ return err;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -417,15 +458,44 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
- if (tegra_fb_is_tiled(fb)) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = tiling.value;
+
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
/* make sure bottom-up buffers are properly displayed */
if (tegra_fb_is_bottom_up(fb)) {
@@ -1214,12 +1284,20 @@ static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+ /*
+ * Keep track of the minimum pitch alignment across all display
+ * controllers.
+ */
+ if (dc->soc->pitch_align > tegra->pitch_align)
+ tegra->pitch_align = dc->soc->pitch_align;
+
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -1277,16 +1355,29 @@ static const struct host1x_client_ops dc_client_ops = {
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
+};
+
+static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 64,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
+ .supports_block_linear = true,
+ .pitch_align = 64,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -1303,6 +1394,7 @@ static const struct of_device_id tegra_dc_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
static int tegra_dc_parse_dt(struct tegra_dc *dc)
{
@@ -1430,6 +1522,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
return err;
}
+ reset_control_assert(dc->rst);
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 78c5feff95d2..705c93b00794 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -428,6 +428,11 @@
#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
#define DC_WINBUF_UFLOW_STATUS 0x80a
+#define DC_WINBUF_SURFACE_KIND 0x80b
+#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0)
+#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 3f132e356e9c..708f783ead47 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -382,6 +382,7 @@ static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
struct platform_driver tegra_dpaux_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index fd736efd14bd..59736bb810cd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -132,6 +132,45 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
return &bo->base;
}
+static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ struct drm_tegra_reloc __user *src,
+ struct drm_device *drm,
+ struct drm_file *file)
+{
+ u32 cmdbuf, target;
+ int err;
+
+ err = get_user(cmdbuf, &src->cmdbuf.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(target, &src->target.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->target.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->shift, &src->shift);
+ if (err < 0)
+ return err;
+
+ dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
+ if (!dest->cmdbuf.bo)
+ return -ENOENT;
+
+ dest->target.bo = host1x_bo_lookup(drm, file, target);
+ if (!dest->target.bo)
+ return -ENOENT;
+
+ return 0;
+}
+
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
@@ -184,26 +223,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- if (copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs)) {
- err = -EFAULT;
- goto fail;
- }
-
+ /* copy and resolve relocations from submit */
while (num_relocs--) {
- struct host1x_reloc *reloc = &job->relocarray[num_relocs];
- struct host1x_bo *cmdbuf, *target;
-
- cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
- target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
- reloc->cmdbuf = cmdbuf;
- reloc->target = target;
-
- if (!reloc->target || !reloc->cmdbuf) {
- err = -ENOENT;
+ err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
+ &relocs[num_relocs], drm,
+ file);
+ if (err < 0)
goto fail;
- }
}
if (copy_from_user(job->waitchk, waitchks,
@@ -455,11 +481,151 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
return 0;
}
+
+static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_tiling *args = data;
+ enum tegra_bo_tiling_mode mode;
+ struct drm_gem_object *gem;
+ unsigned long value = 0;
+ struct tegra_bo *bo;
+
+ switch (args->mode) {
+ case DRM_TEGRA_GEM_TILING_MODE_PITCH:
+ mode = TEGRA_BO_TILING_MODE_PITCH;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_TILED:
+ mode = TEGRA_BO_TILING_MODE_TILED;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
+ mode = TEGRA_BO_TILING_MODE_BLOCK;
+
+ if (args->value > 5)
+ return -EINVAL;
+
+ value = args->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ bo->tiling.mode = mode;
+ bo->tiling.value = value;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_tiling *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+ int err = 0;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ switch (bo->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args->value = bo->tiling.value;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ drm_gem_object_unreference(gem);
+
+ return err;
+}
+
+static int tegra_gem_set_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ bo->flags = 0;
+
+ if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ args->flags = 0;
+
+ if (bo->flags & TEGRA_BO_BOTTOM_UP)
+ args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
#endif
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
@@ -469,6 +635,10 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
#endif
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0d30689dff01..e89c70fa82d5 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,6 +19,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
+#include "gem.h"
+
struct reset_control;
struct tegra_fb {
@@ -43,6 +45,8 @@ struct tegra_drm {
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev *fbdev;
#endif
+
+ unsigned int pitch_align;
};
struct tegra_drm_client;
@@ -160,7 +164,8 @@ struct tegra_dc_window {
unsigned int stride[2];
unsigned long base[3];
bool bottom_up;
- bool tiled;
+
+ struct tegra_bo_tiling tiling;
};
/* from dc.c */
@@ -279,7 +284,8 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling);
int tegra_drm_fb_prepare(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index bd56f2affa78..f7874458926a 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -474,7 +474,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
- value |= DSI_CONTROL_HS_CLK_CTRL;
+ if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ value |= DSI_CONTROL_HS_CLK_CTRL;
value &= ~DSI_CONTROL_TX_TRIG(3);
value &= ~DSI_CONTROL_DCS_ENABLE;
value |= DSI_CONTROL_VIDEO_ENABLE;
@@ -982,6 +983,7 @@ static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
struct platform_driver tegra_dsi_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index fc1528e0bda1..3513d12d5aa1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -46,14 +46,15 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
return false;
}
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
- if (fb->planes[0]->flags & TEGRA_BO_TILED)
- return true;
+ /* TODO: handle YUV formats? */
+ *tiling = fb->planes[0]->tiling;
- return false;
+ return 0;
}
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
@@ -193,6 +194,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+ struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
unsigned int bytes_per_pixel;
@@ -207,7 +209,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
cmd.width = sizes->surface_width;
cmd.height = sizes->surface_height;
- cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
+ tegra->pitch_align);
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index aa85b7b26f10..ce023fa3e8ae 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
#include <linux/dma-buf.h>
#include <drm/tegra_drm.h>
+#include "drm.h"
#include "gem.h"
static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
@@ -126,7 +127,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
goto err_mmap;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
- bo->flags |= TEGRA_BO_TILED;
+ bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
@@ -259,8 +260,10 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ min_pitch = round_up(min_pitch, tegra->pitch_align);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
@@ -420,7 +423,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
int flags)
{
return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
- flags);
+ flags, NULL);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 2f3fe96c5154..43a25c853357 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -16,8 +16,18 @@
#include <drm/drm.h>
#include <drm/drmP.h>
-#define TEGRA_BO_TILED (1 << 0)
-#define TEGRA_BO_BOTTOM_UP (1 << 1)
+#define TEGRA_BO_BOTTOM_UP (1 << 0)
+
+enum tegra_bo_tiling_mode {
+ TEGRA_BO_TILING_MODE_PITCH,
+ TEGRA_BO_TILING_MODE_TILED,
+ TEGRA_BO_TILING_MODE_BLOCK,
+};
+
+struct tegra_bo_tiling {
+ enum tegra_bo_tiling_mode mode;
+ unsigned long value;
+};
struct tegra_bo {
struct drm_gem_object gem;
@@ -26,6 +36,8 @@ struct tegra_bo {
struct sg_table *sgt;
dma_addr_t paddr;
void *vaddr;
+
+ struct tegra_bo_tiling tiling;
};
static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7c53941f2a9e..02cd3e37a6ec 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -121,6 +121,7 @@ static const struct of_device_id gr2d_match[] = {
{ .compatible = "nvidia,tegra20-gr2d" },
{ },
};
+MODULE_DEVICE_TABLE(of, gr2d_match);
static const u32 gr2d_addr_regs[] = {
GR2D_UA_BASE_ADDR,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 30f5ba9bd6d0..0b3f2b977ba0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -12,7 +12,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/tegra-powergate.h>
+
+#include <soc/tegra/pmc.h>
#include "drm.h"
#include "gem.h"
@@ -130,6 +131,7 @@ static const struct of_device_id tegra_gr3d_match[] = {
{ .compatible = "nvidia,tegra20-gr3d" },
{ }
};
+MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
static const u32 gr3d_addr_regs[] = {
GR3D_IDX_ATTRIBUTE( 0),
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ba067bb767e3..ffe26547328d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1450,6 +1450,7 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
static int tegra_hdmi_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 446837e955b6..0c67d7eebc94 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
drm_panel_disable(panel);
tegra_output_disable(output);
+ drm_panel_unprepare(panel);
} else {
+ drm_panel_prepare(panel);
tegra_output_enable(output);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 27c979b50111..7829e81f065d 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -11,7 +11,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/tegra-powergate.h>
+
+#include <soc/tegra/pmc.h>
#include <drm/drm_dp_helper.h>
@@ -516,7 +517,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
}
@@ -525,7 +526,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = 24; /* XXX: don't hardcode? */
+ config.bits_per_pixel = output->connector.display_info.bpc * 3;
err = tegra_sor_calc_config(sor, mode, &config, &link);
if (err < 0)
@@ -815,12 +816,22 @@ static int tegra_output_sor_enable(struct tegra_output *output)
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
* raster, associate with display controller)
*/
- value = SOR_STATE_ASY_VSYNCPOL |
- SOR_STATE_ASY_HSYNCPOL |
- SOR_STATE_ASY_PROTOCOL_DP_A |
+ value = SOR_STATE_ASY_PROTOCOL_DP_A |
SOR_STATE_ASY_CRC_MODE_COMPLETE |
SOR_STATE_ASY_OWNER(dc->pipe + 1);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
switch (config.bits_per_pixel) {
case 24:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
@@ -1455,6 +1466,7 @@ static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra124-sor", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
struct platform_driver tegra_sor_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6be623b4a86f..aea4b7663934 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -502,6 +502,7 @@ static struct drm_driver tilcdc_driver = {
.unload = tilcdc_unload,
.preclose = tilcdc_preclose,
.lastclose = tilcdc_lastclose,
+ .set_busid = drm_platform_set_busid,
.irq_handler = tilcdc_irq,
.irq_preinstall = tilcdc_irq_preinstall,
.irq_postinstall = tilcdc_irq_postinstall,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3da89d5dab60..a11969acfea5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -40,6 +40,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/reservation.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -53,12 +54,13 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
-static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+static inline int ttm_mem_type_from_place(const struct ttm_place *place,
+ uint32_t *mem_type)
{
int i;
for (i = 0; i <= TTM_PL_PRIV5; i++)
- if (flags & (1 << i)) {
+ if (place->flags & (1 << i)) {
*mem_type = i;
return 0;
}
@@ -89,12 +91,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
+ ret = ttm_mem_type_from_place(&placement->placement[i],
&mem_type);
if (ret)
return;
pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i], mem_type);
+ i, placement->placement[i].flags, mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -141,7 +143,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
BUG_ON(atomic_read(&bo->cpu_writers));
- BUG_ON(bo->sync_obj != NULL);
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
@@ -402,36 +403,48 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ww_mutex_unlock (&bo->resv->lock);
}
+static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
+{
+ struct reservation_object_list *fobj;
+ struct fence *fence;
+ int i;
+
+ fobj = reservation_object_get_list(bo->resv);
+ fence = reservation_object_get_excl(bo->resv);
+ if (fence && !fence->ops->signaled)
+ fence_enable_sw_signaling(fence);
+
+ for (i = 0; fobj && i < fobj->shared_count; ++i) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(bo->resv));
+
+ if (!fence->ops->signaled)
+ fence_enable_sw_signaling(fence);
+ }
+}
+
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver = bdev->driver;
- void *sync_obj = NULL;
int put_count;
int ret;
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
- spin_lock(&bdev->fence_lock);
- (void) ttm_bo_wait(bo, false, false, true);
- if (!ret && !bo->sync_obj) {
- spin_unlock(&bdev->fence_lock);
- put_count = ttm_bo_del_from_lru(bo);
-
- spin_unlock(&glob->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
+ if (!ret) {
+ if (!ttm_bo_wait(bo, false, false, true)) {
+ put_count = ttm_bo_del_from_lru(bo);
- ttm_bo_list_ref_sub(bo, put_count, true);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
- return;
- }
- if (bo->sync_obj)
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
- if (!ret) {
+ return;
+ } else
+ ttm_bo_flush_all_fences(bo);
/*
* Make NO_EVICT bos immediately available to
@@ -450,10 +463,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- if (sync_obj) {
- driver->sync_obj_flush(sync_obj);
- driver->sync_obj_unref(&sync_obj);
- }
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
@@ -474,44 +483,26 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait_gpu)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret;
- spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
if (ret && !no_wait_gpu) {
- void *sync_obj;
-
- /*
- * Take a reference to the fence and unreserve,
- * at this point the buffer should be dead, so
- * no new sync objects can be attached.
- */
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
-
- __ttm_bo_unreserve(bo);
+ long lret;
+ ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
- ret = driver->sync_obj_wait(sync_obj, false, interruptible);
- driver->sync_obj_unref(&sync_obj);
- if (ret)
- return ret;
+ lret = reservation_object_wait_timeout_rcu(bo->resv,
+ true,
+ interruptible,
+ 30 * HZ);
- /*
- * remove sync_obj with ttm_bo_wait, the wait should be
- * finished, and no new wait object should have been added.
- */
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, true);
- WARN_ON(ret);
- spin_unlock(&bdev->fence_lock);
- if (ret)
- return ret;
+ if (lret < 0)
+ return lret;
+ else if (lret == 0)
+ return -EBUSY;
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
@@ -528,8 +519,14 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
return 0;
}
- } else
- spin_unlock(&bdev->fence_lock);
+
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ }
if (ret || unlikely(list_empty(&bo->ddestroy))) {
__ttm_bo_unreserve(bo);
@@ -667,9 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -685,8 +680,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
- placement.fpfn = 0;
- placement.lpfn = 0;
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
@@ -774,7 +767,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
- struct ttm_placement *placement,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_gpu)
@@ -784,7 +777,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
int ret;
do {
- ret = (*man->func->get_node)(man, bo, placement, 0, mem);
+ ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
@@ -827,18 +820,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
uint32_t mem_type,
- uint32_t proposed_placement,
+ const struct ttm_place *place,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+ if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
return false;
- if ((proposed_placement & man->available_caching) == 0)
+ if ((place->flags & man->available_caching) == 0)
return false;
- cur_flags |= (proposed_placement & man->available_caching);
+ cur_flags |= (place->flags & man->available_caching);
*masked_placement = cur_flags;
return true;
@@ -869,15 +862,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
- &mem_type);
+ const struct ttm_place *place = &placement->placement[i];
+
+ ret = ttm_mem_type_from_place(place, &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
- type_ok = ttm_bo_mt_compatible(man,
- mem_type,
- placement->placement[i],
+ type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
if (!type_ok)
@@ -889,7 +881,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, placement->placement[i],
+ ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
@@ -897,8 +889,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) {
type_found = true;
- ret = (*man->func->get_node)(man, bo, placement,
- cur_flags, mem);
+ ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
}
@@ -916,17 +907,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return -EINVAL;
for (i = 0; i < placement->num_busy_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->busy_placement[i],
- &mem_type);
+ const struct ttm_place *place = &placement->busy_placement[i];
+
+ ret = ttm_mem_type_from_place(place, &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type)
continue;
- if (!ttm_bo_mt_compatible(man,
- mem_type,
- placement->busy_placement[i],
- &cur_flags))
+ if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -935,7 +924,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+ ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) {
@@ -945,7 +934,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
@@ -966,7 +955,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
{
int ret = 0;
struct ttm_mem_reg mem;
- struct ttm_bo_device *bdev = bo->bdev;
lockdep_assert_held(&bo->resv->lock.base);
@@ -975,9 +963,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
- spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
@@ -1006,20 +992,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
{
int i;
- if (mem->mm_node && placement->lpfn != 0 &&
- (mem->start < placement->fpfn ||
- mem->start + mem->num_pages > placement->lpfn))
- return false;
-
for (i = 0; i < placement->num_placement; i++) {
- *new_flags = placement->placement[i];
+ const struct ttm_place *heap = &placement->placement[i];
+ if (mem->mm_node && heap->lpfn != 0 &&
+ (mem->start < heap->fpfn ||
+ mem->start + mem->num_pages > heap->lpfn))
+ continue;
+
+ *new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
for (i = 0; i < placement->num_busy_placement; i++) {
- *new_flags = placement->busy_placement[i];
+ const struct ttm_place *heap = &placement->busy_placement[i];
+ if (mem->mm_node && heap->lpfn != 0 &&
+ (mem->start < heap->fpfn ||
+ mem->start + mem->num_pages > heap->lpfn))
+ continue;
+
+ *new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
@@ -1037,11 +1030,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
lockdep_assert_held(&bo->resv->lock.base);
- /* Check that range is valid */
- if (placement->lpfn || placement->fpfn)
- if (placement->fpfn > placement->lpfn ||
- (placement->lpfn - placement->fpfn) < bo->num_pages)
- return -EINVAL;
/*
* Check whether we need to move buffer.
*/
@@ -1070,15 +1058,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_check_placement(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- BUG_ON((placement->fpfn || placement->lpfn) &&
- (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
-
- return 0;
-}
-
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
@@ -1147,15 +1126,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
- ret = ttm_bo_check_placement(bo, placement);
-
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (likely(!ret) &&
- (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg))
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg)
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
bo->mem.num_pages);
@@ -1477,7 +1453,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
- spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1530,65 +1505,56 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
- struct ttm_bo_driver *driver = bo->bdev->driver;
- struct ttm_bo_device *bdev = bo->bdev;
- void *sync_obj;
- int ret = 0;
-
- if (likely(bo->sync_obj == NULL))
- return 0;
+ struct reservation_object_list *fobj;
+ struct reservation_object *resv;
+ struct fence *excl;
+ long timeout = 15 * HZ;
+ int i;
- while (bo->sync_obj) {
+ resv = bo->resv;
+ fobj = reservation_object_get_list(resv);
+ excl = reservation_object_get_excl(resv);
+ if (excl) {
+ if (!fence_is_signaled(excl)) {
+ if (no_wait)
+ return -EBUSY;
- if (driver->sync_obj_signaled(bo->sync_obj)) {
- void *tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bdev->fence_lock);
- continue;
+ timeout = fence_wait_timeout(excl,
+ interruptible, timeout);
}
+ }
- if (no_wait)
- return -EBUSY;
+ for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
+ struct fence *fence;
+ fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(resv));
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
- ret = driver->sync_obj_wait(sync_obj,
- lazy, interruptible);
- if (unlikely(ret != 0)) {
- driver->sync_obj_unref(&sync_obj);
- spin_lock(&bdev->fence_lock);
- return ret;
- }
- spin_lock(&bdev->fence_lock);
- if (likely(bo->sync_obj == sync_obj)) {
- void *tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- clear_bit(TTM_BO_PRIV_FLAG_MOVING,
- &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&sync_obj);
- driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bdev->fence_lock);
- } else {
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&sync_obj);
- spin_lock(&bdev->fence_lock);
+ if (!fence_is_signaled(fence)) {
+ if (no_wait)
+ return -EBUSY;
+
+ timeout = fence_wait_timeout(fence,
+ interruptible, timeout);
}
}
+
+ if (timeout < 0)
+ return timeout;
+
+ if (timeout == 0)
+ return -EBUSY;
+
+ reservation_object_add_excl_fence(resv, NULL);
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
- struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
/*
@@ -1598,9 +1564,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
if (unlikely(ret != 0))
return ret;
- spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
- spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1657,9 +1621,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Wait for GPU, then move to system cached.
*/
- spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9e103a4875c8..964387fc5c8f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -49,8 +49,7 @@ struct ttm_range_manager {
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
unsigned long lpfn;
int ret;
- lpfn = placement->lpfn;
+ lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
@@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (flags & TTM_PL_FLAG_TOPDOWN)
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
- placement->fpfn, lpfn,
+ place->fpfn, lpfn,
DRM_MM_SEARCH_BEST,
aflags);
spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 30e5d90cb7bc..824af90cbe31 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/reservation.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
@@ -444,8 +445,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
@@ -466,12 +465,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj)
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
- else
- fbo->sync_obj = NULL;
- spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@@ -644,30 +637,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- void *sync_obj,
+ struct fence *fence,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
- void *tmp_obj = NULL;
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj) {
- tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- }
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ reservation_object_add_excl_fence(bo->resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
@@ -688,14 +671,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
+ reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ce48e5a9cb4..d05437f219e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
- spin_lock(&bdev->fence_lock);
if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
goto out_unlock;
@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
VM_FAULT_NOPAGE;
out_unlock:
- spin_unlock(&bdev->fence_lock);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e8dac8758528..adafc0f8ec06 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,20 +32,12 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
+ struct ttm_validate_buffer *entry)
{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
+ list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (!entry->reserved)
- continue;
- entry->reserved = false;
- if (entry->removed) {
- ttm_bo_add_to_lru(bo);
- entry->removed = false;
- }
__ttm_bo_unreserve(bo);
}
}
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (!entry->reserved)
- continue;
-
- if (!entry->removed) {
- entry->put_count = ttm_bo_del_from_lru(bo);
- entry->removed = true;
- }
- }
-}
-
-static void ttm_eu_list_ref_sub(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
+ unsigned put_count = ttm_bo_del_from_lru(bo);
- if (entry->put_count) {
- ttm_bo_list_ref_sub(bo, entry->put_count, true);
- entry->put_count = 0;
- }
+ ttm_bo_list_ref_sub(bo, put_count, true);
}
}
@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
+
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list);
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ ttm_bo_add_to_lru(bo);
+ __ttm_bo_unreserve(bo);
+ }
+ spin_unlock(&glob->lru_lock);
+
if (ticket)
ww_acquire_fini(ticket);
- spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -112,7 +93,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list)
+ struct list_head *list, bool intr)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -121,60 +102,55 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (list_empty(list))
return 0;
- list_for_each_entry(entry, list, head) {
- entry->reserved = false;
- entry->put_count = 0;
- entry->removed = false;
- }
-
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
-retry:
+
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- /* already slowpath reserved? */
- if (entry->reserved)
+ ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
+ ticket);
+ if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ __ttm_bo_unreserve(bo);
+
+ ret = -EBUSY;
+ }
+
+ if (!ret)
continue;
- ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
- ticket);
+ /* uh oh, we lost out, drop every reservation and try
+ * to only reserve this buffer, then start over if
+ * this succeeds.
+ */
+ ttm_eu_backoff_reservation_reverse(list, entry);
- if (ret == -EDEADLK) {
- /* uh oh, we lost out, drop every reservation and try
- * to only reserve this buffer, then start over if
- * this succeeds.
- */
- BUG_ON(ticket == NULL);
- spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
+ if (ret == -EDEADLK && intr) {
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket);
- if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- goto err_fini;
- }
+ } else if (ret == -EDEADLK) {
+ ww_mutex_lock_slow(&bo->resv->lock, ticket);
+ ret = 0;
+ }
- entry->reserved = true;
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ret = -EBUSY;
- goto err;
+ if (unlikely(ret != 0)) {
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
}
- goto retry;
- } else if (ret)
- goto err;
-
- entry->reserved = true;
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ret = -EBUSY;
- goto err;
+ return ret;
}
+
+ /* move this item to the front of the list,
+ * forces correct iteration of the loop without keeping track
+ */
+ list_del(&entry->head);
+ list_add(&entry->head, list);
}
if (ticket)
@@ -182,25 +158,12 @@ retry:
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
return 0;
-
-err:
- spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
-err_fini:
- if (ticket) {
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
- }
- return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, void *sync_obj)
+ struct list_head *list, struct fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
@@ -217,24 +180,15 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
glob = bo->glob;
spin_lock(&glob->lru_lock);
- spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- entry->old_sync_obj = bo->sync_obj;
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
- entry->reserved = false;
}
- spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
if (ticket)
ww_acquire_fini(ticket);
-
- list_for_each_entry(entry, list, head) {
- if (entry->old_sync_obj)
- driver->sync_obj_unref(&entry->old_sync_obj);
- }
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index d2a053352789..12c87110db3a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -695,7 +695,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
}
dma_buf = dma_buf_export(prime, &tdev->ops,
- prime->size, flags);
+ prime->size, flags, NULL);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index ca65df144765..c96db433f8af 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -848,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
+ ttm_dma->cpu_address[index] = d_page->vaddr;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
@@ -979,6 +980,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
+ ttm_dma->cpu_address[i] = 0;
ttm_dma->dma_address[i] = 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75f319090043..bf080abc86d1 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address));
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->ttm.pages) +
+ sizeof(*ttm->dma_address) +
+ sizeof(*ttm->cpu_address));
+ ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+ ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
}
#ifdef CONFIG_X86
@@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages || !ttm_dma->dma_address) {
+ if (!ttm->pages) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
drm_free_large(ttm->pages);
ttm->pages = NULL;
- drm_free_large(ttm_dma->dma_address);
+ ttm_dma->cpu_address = NULL;
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index f02528686cd5..613ab0622d6e 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,8 +1,9 @@
config DRM_UDL
tristate "DisplayLink"
depends on DRM
+ depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
- select DRM_USB
+ select USB
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e026a9e2942a..0110d95522f3 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -34,8 +34,8 @@ static u8 *udl_get_edid(struct udl_device *udl)
goto error;
for (i = 0; i < EDID_LENGTH; i++) {
- ret = usb_control_msg(udl->ddev->usbdev,
- usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
+ ret = usb_control_msg(udl->udev,
+ usb_rcvctrlpipe(udl->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
HZ);
if (ret < 1) {
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 3ddd6cd98ac1..8607e9e513db 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -7,48 +7,13 @@
*/
#include <linux/module.h>
-#include <drm/drm_usb.h>
+#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
-static struct drm_driver driver;
-
-/*
- * There are many DisplayLink-based graphics products, all with unique PIDs.
- * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
- * We also require a match on SubClass (0x00) and Protocol (0x00),
- * which is compatible with all known USB 2.0 era graphics chips and firmware,
- * but allows DisplayLink to increment those for any future incompatible chips
- */
-static struct usb_device_id id_table[] = {
- {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0x00,
- .bInterfaceProtocol = 0x00,
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS |
- USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
- {},
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-MODULE_LICENSE("GPL");
-
-static int udl_usb_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
{
- return drm_get_usb_dev(interface, id, &driver);
-}
-
-static void udl_usb_disconnect(struct usb_interface *interface)
-{
- struct drm_device *dev = usb_get_intfdata(interface);
-
- drm_kms_helper_poll_disable(dev);
- drm_connector_unplug_all(dev);
- udl_fbdev_unplug(dev);
- udl_drop_usb(dev);
- drm_unplug_dev(dev);
+ return 0;
}
static const struct vm_operations_struct udl_gem_vm_ops = {
@@ -75,6 +40,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
+ .set_busid = udl_driver_set_busid,
/* gem hooks */
.gem_free_object = udl_gem_free_object,
@@ -96,6 +62,61 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static int udl_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct drm_device *dev;
+ int r;
+
+ dev = drm_dev_alloc(&driver, &interface->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ r = drm_dev_register(dev, (unsigned long)udev);
+ if (r)
+ goto err_free;
+
+ usb_set_intfdata(interface, dev);
+ DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(dev);
+ return r;
+}
+
+static void udl_usb_disconnect(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+
+ drm_kms_helper_poll_disable(dev);
+ drm_connector_unplug_all(dev);
+ udl_fbdev_unplug(dev);
+ udl_drop_usb(dev);
+ drm_unplug_dev(dev);
+}
+
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
+static struct usb_device_id id_table[] = {
+ {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0x00,
+ .bInterfaceProtocol = 0x00,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
+ {},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
static struct usb_driver udl_driver = {
.name = "udl",
.probe = udl_usb_probe,
@@ -105,13 +126,14 @@ static struct usb_driver udl_driver = {
static int __init udl_init(void)
{
- return drm_usb_init(&driver, &udl_driver);
+ return usb_register(&udl_driver);
}
static void __exit udl_exit(void)
{
- drm_usb_exit(&driver, &udl_driver);
+ usb_deregister(&udl_driver);
}
module_init(udl_init);
module_exit(udl_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1fbf7b357f16..51e10ee77f39 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -47,6 +47,7 @@ struct udl_fbdev;
struct udl_device {
struct device *dev;
struct drm_device *ddev;
+ struct usb_device *udev;
int sku_pixel_limit;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 42795674bc07..33dbfb2c4748 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -202,7 +202,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
@@ -211,7 +211,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
/* urb->transfer_buffer_length set to actual before submit */
- usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
+ usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
buf, size, udl_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -282,6 +282,7 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
int udl_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct usb_device *udev = (void*)flags;
struct udl_device *udl;
int ret = -ENOMEM;
@@ -290,10 +291,11 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (!udl)
return -ENOMEM;
+ udl->udev = udev;
udl->ddev = dev;
dev->dev_private = udl;
- if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
+ if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
ret = -ENODEV;
DRM_ERROR("firmware not recognized. Assume incompatible device\n");
goto err;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 50abc2adfaee..c16ffa63ded6 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -79,6 +79,7 @@ static struct drm_driver driver = {
.open = via_driver_open,
.preclose = via_reclaim_buffers_locked,
.postclose = via_driver_postclose,
+ .set_busid = drm_pci_set_busid,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index d0ab3fb32acd..67e70e955504 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -31,7 +31,7 @@ static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
DRM_DEBUG("\n");
- dev_priv->sarea = drm_getsarea(dev);
+ dev_priv->sarea = drm_legacy_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index d70b1e1544bf..4f20742e7788 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -211,12 +211,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
if (!(file->minor->master && file->master->lock.hw_lock))
return;
- drm_idlelock_take(&file->master->lock);
+ drm_legacy_idlelock_take(&file->master->lock);
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
- drm_idlelock_release(&file->master->lock);
+ drm_legacy_idlelock_release(&file->master->lock);
return;
}
@@ -231,7 +231,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
}
mutex_unlock(&dev->struct_mutex);
- drm_idlelock_release(&file->master->lock);
+ drm_legacy_idlelock_release(&file->master->lock);
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 6327cfc36805..cff2bf9db9d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,66 +30,101 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
- TTM_PL_FLAG_CACHED;
-
-static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
- TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT;
+static struct ttm_place vram_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
-static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
- TTM_PL_FLAG_CACHED;
+static struct ttm_place vram_ne_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
-static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
- TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT;
+static struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+};
-static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
- TTM_PL_FLAG_CACHED;
+static struct ttm_place sys_ne_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
-static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
- TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT;
+static struct ttm_place gmr_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
-static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
- TTM_PL_FLAG_CACHED;
+static struct ttm_place gmr_ne_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
-struct ttm_placement vmw_vram_placement = {
+static struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
-static uint32_t vram_gmr_placement_flags[] = {
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+static struct ttm_place vram_gmr_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ }
};
-static uint32_t gmr_vram_placement_flags[] = {
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+static struct ttm_place gmr_vram_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ }
};
struct ttm_placement vmw_vram_gmr_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_placement_flags
};
-static uint32_t vram_gmr_ne_placement_flags[] = {
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+static struct ttm_place vram_gmr_ne_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT
+ }
};
struct ttm_placement vmw_vram_gmr_ne_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_ne_placement_flags,
.num_busy_placement = 1,
@@ -97,8 +132,6 @@ struct ttm_placement vmw_vram_gmr_ne_placement = {
};
struct ttm_placement vmw_vram_sys_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
@@ -106,8 +139,6 @@ struct ttm_placement vmw_vram_sys_placement = {
};
struct ttm_placement vmw_vram_ne_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
@@ -115,8 +146,6 @@ struct ttm_placement vmw_vram_ne_placement = {
};
struct ttm_placement vmw_sys_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
@@ -124,24 +153,33 @@ struct ttm_placement vmw_sys_placement = {
};
struct ttm_placement vmw_sys_ne_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.placement = &sys_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_ne_placement_flags
};
-static uint32_t evictable_placement_flags[] = {
- TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+static struct ttm_place evictable_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ }
};
struct ttm_placement vmw_evictable_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
@@ -149,8 +187,6 @@ struct ttm_placement vmw_evictable_placement = {
};
struct ttm_placement vmw_srf_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.num_busy_placement = 2,
.placement = &gmr_placement_flags,
@@ -158,8 +194,6 @@ struct ttm_placement vmw_srf_placement = {
};
struct ttm_placement vmw_mob_placement = {
- .fpfn = 0,
- .lpfn = 0,
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_placement_flags,
@@ -768,44 +802,6 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
/**
- * FIXME: We're using the old vmware polling method to sync.
- * Do this with fences instead.
- */
-
-static void *vmw_sync_obj_ref(void *sync_obj)
-{
-
- return (void *)
- vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
-}
-
-static void vmw_sync_obj_unref(void **sync_obj)
-{
- vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
-}
-
-static int vmw_sync_obj_flush(void *sync_obj)
-{
- vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
- return 0;
-}
-
-static bool vmw_sync_obj_signaled(void *sync_obj)
-{
- return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
- DRM_VMW_FENCE_FLAG_EXEC);
-
-}
-
-static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
-{
- return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
- DRM_VMW_FENCE_FLAG_EXEC,
- lazy, interruptible,
- VMW_FENCE_WAIT_TIMEOUT);
-}
-
-/**
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
@@ -829,11 +825,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
-
- spin_lock(&bdev->fence_lock);
ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
}
@@ -846,11 +838,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
- .sync_obj_signaled = vmw_sync_obj_signaled,
- .sync_obj_wait = vmw_sync_obj_wait,
- .sync_obj_flush = vmw_sync_obj_flush,
- .sync_obj_unref = vmw_sync_obj_unref,
- .sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
.swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index ed1d51006ab1..914b375763dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -198,13 +198,19 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
+ struct ttm_place place;
int ret = 0;
if (pin)
- placement = vmw_vram_ne_placement;
+ place = vmw_vram_ne_placement.placement[0];
else
- placement = vmw_vram_placement;
- placement.lpfn = bo->num_pages;
+ place = vmw_vram_placement.placement[0];
+ place.lpfn = bo->num_pages;
+
+ placement.num_placement = 1;
+ placement.placement = &place;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &place;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
@@ -293,21 +299,23 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
*/
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
- uint32_t pl_flags;
+ struct ttm_place pl;
struct ttm_placement placement;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
- pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ pl.fpfn = 0;
+ pl.lpfn = 0;
+ pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
- pl_flags |= TTM_PL_FLAG_NO_EVICT;
+ pl.flags |= TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
- placement.placement = &pl_flags;
+ placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, false, true);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 63c4d6f0281e..7197af157313 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -990,7 +990,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
- if (drm_is_master(file_priv)) {
+ if (file_priv->is_master) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
@@ -1418,6 +1418,7 @@ static struct drm_driver driver = {
.open = vmw_driver_open,
.preclose = vmw_preclose,
.postclose = vmw_postclose,
+ .set_busid = drm_pci_set_busid,
.dumb_create = vmw_dumb_create,
.dumb_map_offset = vmw_dumb_map_offset,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c1811750cc8d..4ee799b43d5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -169,8 +169,8 @@ struct vmw_surface {
struct vmw_marker_queue {
struct list_head head;
- struct timespec lag;
- struct timespec lag_time;
+ u64 lag;
+ u64 lag_time;
spinlock_t lock;
};
@@ -342,7 +342,6 @@ struct vmw_sw_context{
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
- uint32_t fence_flags;
struct ttm_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
@@ -704,6 +703,7 @@ extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
+extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7bfdaa163a33..0ceaddc8e4f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -346,13 +346,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
- val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
- sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
-
if (p_val_node)
*p_val_node = val_node;
@@ -2338,13 +2335,9 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
if (p_handle != NULL)
ret = vmw_user_fence_create(file_priv, dev_priv->fman,
- sequence,
- DRM_VMW_FENCE_FLAG_EXEC,
- p_fence, p_handle);
+ sequence, p_fence, p_handle);
else
- ret = vmw_fence_create(dev_priv->fman, sequence,
- DRM_VMW_FENCE_FLAG_EXEC,
- p_fence);
+ ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
if (unlikely(ret != 0 && !synced)) {
(void) vmw_fallback_wait(dev_priv, false, false,
@@ -2396,7 +2389,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
BUG_ON(fence == NULL);
fence_rep.handle = fence_handle;
- fence_rep.seqno = fence->seqno;
+ fence_rep.seqno = fence->base.seqno;
vmw_update_seqno(dev_priv, &dev_priv->fifo);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
@@ -2417,8 +2410,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
- (void) vmw_fence_obj_wait(fence, fence->signal_mask,
- false, false,
+ (void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
@@ -2470,7 +2462,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
- sw_context->fence_flags = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
@@ -2496,7 +2487,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
if (unlikely(ret != 0))
goto out_err;
@@ -2684,10 +2675,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
- do {
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
- } while (ret == -ERESTARTSYS);
-
+ ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b031b48dbb3c..0a474f391fad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -374,10 +374,16 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out)
{
struct vmw_dma_buffer *vmw_bo;
- struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
+ struct ttm_placement ne_placement;
int ret;
- ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ne_placement.num_placement = 1;
+ ne_placement.placement = &ne_place;
+ ne_placement.num_busy_placement = 1;
+ ne_placement.busy_placement = &ne_place;
+
+ ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 436b013b4231..197164fd7803 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
+ struct work_struct work, ping_work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -46,6 +46,7 @@ struct vmw_fence_manager {
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
+ unsigned ctx;
};
struct vmw_user_fence {
@@ -80,6 +81,12 @@ struct vmw_event_fence_action {
uint32_t *tv_usec;
};
+static struct vmw_fence_manager *
+fman_from_fence(struct vmw_fence_obj *fence)
+{
+ return container_of(fence->base.lock, struct vmw_fence_manager, lock);
+}
+
/**
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
@@ -102,25 +109,143 @@ struct vmw_event_fence_action {
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy_locked(struct kref *kref)
+static void vmw_fence_obj_destroy(struct fence *f)
{
struct vmw_fence_obj *fence =
- container_of(kref, struct vmw_fence_obj, kref);
+ container_of(f, struct vmw_fence_obj, base);
- struct vmw_fence_manager *fman = fence->fman;
- unsigned int num_fences;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+ unsigned long irq_flags;
+ spin_lock_irqsave(&fman->lock, irq_flags);
list_del_init(&fence->head);
- num_fences = --fman->num_fence_objects;
- spin_unlock_irq(&fman->lock);
- if (fence->destroy)
- fence->destroy(fence);
- else
- kfree(fence);
+ --fman->num_fence_objects;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+ fence->destroy(fence);
+}
- spin_lock_irq(&fman->lock);
+static const char *vmw_fence_get_driver_name(struct fence *f)
+{
+ return "vmwgfx";
+}
+
+static const char *vmw_fence_get_timeline_name(struct fence *f)
+{
+ return "svga";
+}
+
+static void vmw_fence_ping_func(struct work_struct *work)
+{
+ struct vmw_fence_manager *fman =
+ container_of(work, struct vmw_fence_manager, ping_work);
+
+ vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
+}
+
+static bool vmw_fence_enable_signaling(struct fence *f)
+{
+ struct vmw_fence_obj *fence =
+ container_of(f, struct vmw_fence_obj, base);
+
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+ struct vmw_private *dev_priv = fman->dev_priv;
+
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+ return false;
+
+ if (mutex_trylock(&dev_priv->hw_mutex)) {
+ vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
+ mutex_unlock(&dev_priv->hw_mutex);
+ } else
+ schedule_work(&fman->ping_work);
+
+ return true;
+}
+
+struct vmwgfx_wait_cb {
+ struct fence_cb base;
+ struct task_struct *task;
+};
+
+static void
+vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+ struct vmwgfx_wait_cb *wait =
+ container_of(cb, struct vmwgfx_wait_cb, base);
+
+ wake_up_process(wait->task);
+}
+
+static void __vmw_fences_update(struct vmw_fence_manager *fman);
+
+static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+{
+ struct vmw_fence_obj *fence =
+ container_of(f, struct vmw_fence_obj, base);
+
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+ struct vmw_private *dev_priv = fman->dev_priv;
+ struct vmwgfx_wait_cb cb;
+ long ret = timeout;
+ unsigned long irq_flags;
+
+ if (likely(vmw_fence_obj_signaled(fence)))
+ return timeout;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ vmw_seqno_waiter_add(dev_priv);
+
+ spin_lock_irqsave(f->lock, irq_flags);
+
+ if (intr && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ cb.base.func = vmwgfx_wait_cb;
+ cb.task = current;
+ list_add(&cb.base.node, &f->cb_list);
+
+ while (ret > 0) {
+ __vmw_fences_update(fman);
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ break;
+
+ if (intr)
+ __set_current_state(TASK_INTERRUPTIBLE);
+ else
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irqrestore(f->lock, irq_flags);
+
+ ret = schedule_timeout(ret);
+
+ spin_lock_irqsave(f->lock, irq_flags);
+ if (ret > 0 && intr && signal_pending(current))
+ ret = -ERESTARTSYS;
+ }
+
+ if (!list_empty(&cb.base.node))
+ list_del(&cb.base.node);
+ __set_current_state(TASK_RUNNING);
+
+out:
+ spin_unlock_irqrestore(f->lock, irq_flags);
+
+ vmw_seqno_waiter_remove(dev_priv);
+
+ return ret;
}
+static struct fence_ops vmw_fence_ops = {
+ .get_driver_name = vmw_fence_get_driver_name,
+ .get_timeline_name = vmw_fence_get_timeline_name,
+ .enable_signaling = vmw_fence_enable_signaling,
+ .wait = vmw_fence_wait,
+ .release = vmw_fence_obj_destroy,
+};
+
/**
* Execute signal actions on fences recently signaled.
@@ -180,12 +305,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
+ INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
+ fman->ctx = fence_context_alloc(1);
return fman;
}
@@ -196,6 +323,7 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
+ (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
@@ -207,23 +335,16 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
}
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
- struct vmw_fence_obj *fence,
- u32 seqno,
- uint32_t mask,
+ struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence))
{
unsigned long irq_flags;
- unsigned int num_fences;
int ret = 0;
- fence->seqno = seqno;
+ fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
- fence->fman = fman;
- fence->signaled = 0;
- fence->signal_mask = mask;
- kref_init(&fence->kref);
fence->destroy = destroy;
- init_waitqueue_head(&fence->queue);
spin_lock_irqsave(&fman->lock, irq_flags);
if (unlikely(fman->fifo_down)) {
@@ -231,7 +352,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
- num_fences = ++fman->num_fence_objects;
+ ++fman->num_fence_objects;
out_unlock:
spin_unlock_irqrestore(&fman->lock, irq_flags);
@@ -239,38 +360,6 @@ out_unlock:
}
-struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
-{
- if (unlikely(fence == NULL))
- return NULL;
-
- kref_get(&fence->kref);
- return fence;
-}
-
-/**
- * vmw_fence_obj_unreference
- *
- * Note that this function may not be entered with disabled irqs since
- * it may re-enable them in the destroy function.
- *
- */
-void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
-{
- struct vmw_fence_obj *fence = *fence_p;
- struct vmw_fence_manager *fman;
-
- if (unlikely(fence == NULL))
- return;
-
- fman = fence->fman;
- *fence_p = NULL;
- spin_lock_irq(&fman->lock);
- BUG_ON(atomic_read(&fence->kref.refcount) == 0);
- kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
- spin_unlock_irq(&fman->lock);
-}
-
static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
@@ -326,7 +415,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- iowrite32(fence->seqno,
+ iowrite32(fence->base.seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
break;
}
@@ -353,27 +442,27 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
*/
static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
__le32 __iomem *fifo_mem;
- if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
+ if (fence_is_signaled_locked(&fence->base))
return false;
- fifo_mem = fence->fman->dev_priv->mmio_virt;
+ fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
- if (likely(fence->fman->seqno_valid &&
- goal_seqno - fence->seqno < VMW_FENCE_WRAP))
+ if (likely(fman->seqno_valid &&
+ goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
- fence->fman->seqno_valid = true;
+ iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ fman->seqno_valid = true;
return true;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+static void __vmw_fences_update(struct vmw_fence_manager *fman)
{
- unsigned long flags;
struct vmw_fence_obj *fence, *next_fence;
struct list_head action_list;
bool needs_rerun;
@@ -382,32 +471,25 @@ void vmw_fences_update(struct vmw_fence_manager *fman)
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
rerun:
- spin_lock_irqsave(&fman->lock, flags);
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (seqno - fence->seqno < VMW_FENCE_WRAP) {
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
- wake_up_all(&fence->queue);
} else
break;
}
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
- spin_unlock_irqrestore(&fman->lock, flags);
-
/*
* Rerun if the fence goal seqno was updated, and the
* hardware might have raced with that update, so that
* we missed a fence_goal irq.
*/
+ needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
@@ -415,79 +497,58 @@ rerun:
goto rerun;
}
}
+
+ if (!list_empty(&fman->cleanup_list))
+ (void) schedule_work(&fman->work);
}
-bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
- uint32_t flags)
+void vmw_fences_update(struct vmw_fence_manager *fman)
{
- struct vmw_fence_manager *fman = fence->fman;
unsigned long irq_flags;
- uint32_t signaled;
spin_lock_irqsave(&fman->lock, irq_flags);
- signaled = fence->signaled;
+ __vmw_fences_update(fman);
spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
- flags &= fence->signal_mask;
- if ((signaled & flags) == flags)
- return 1;
+bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
+{
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
- if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
- vmw_fences_update(fman);
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ return 1;
- spin_lock_irqsave(&fman->lock, irq_flags);
- signaled = fence->signaled;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ vmw_fences_update(fman);
- return ((signaled & flags) == flags);
+ return fence_is_signaled(&fence->base);
}
-int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
- uint32_t flags, bool lazy,
+int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- struct vmw_private *dev_priv = fence->fman->dev_priv;
- long ret;
+ long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
- if (likely(vmw_fence_obj_signaled(fence, flags)))
+ if (likely(ret > 0))
return 0;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- vmw_seqno_waiter_add(dev_priv);
-
- if (interruptible)
- ret = wait_event_interruptible_timeout
- (fence->queue,
- vmw_fence_obj_signaled(fence, flags),
- timeout);
+ else if (ret == 0)
+ return -EBUSY;
else
- ret = wait_event_timeout
- (fence->queue,
- vmw_fence_obj_signaled(fence, flags),
- timeout);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- if (unlikely(ret == 0))
- ret = -EBUSY;
- else if (likely(ret > 0))
- ret = 0;
-
- return ret;
+ return ret;
}
void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
{
- struct vmw_private *dev_priv = fence->fman->dev_priv;
+ struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
}
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- struct vmw_fence_manager *fman = fence->fman;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+ fence_free(&fence->base);
- kfree(fence);
/*
* Free kernel space accounting.
*/
@@ -497,7 +558,6 @@ static void vmw_fence_destroy(struct vmw_fence_obj *fence)
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
- uint32_t mask,
struct vmw_fence_obj **p_fence)
{
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
@@ -515,7 +575,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
goto out_no_object;
}
- ret = vmw_fence_obj_init(fman, fence, seqno, mask,
+ ret = vmw_fence_obj_init(fman, fence, seqno,
vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -535,7 +595,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
- struct vmw_fence_manager *fman = fence->fman;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
ttm_base_object_kfree(ufence, base);
/*
@@ -559,7 +619,6 @@ static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t seqno,
- uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle)
{
@@ -586,7 +645,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
}
ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
- mask, vmw_user_fence_destroy);
+ vmw_user_fence_destroy);
if (unlikely(ret != 0)) {
kfree(ufence);
goto out_no_object;
@@ -629,7 +688,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
struct list_head action_list;
int ret;
@@ -638,35 +696,32 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock.
*/
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock_irq(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- kref_get(&fence->kref);
+ fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
- ret = vmw_fence_obj_wait(fence, fence->signal_mask,
- false, false,
+ ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
- wake_up_all(&fence->queue);
}
- spin_lock_irq(&fman->lock);
-
BUG_ON(!list_empty(&fence->head));
- kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+ fence_put(&fence->base);
+ spin_lock_irq(&fman->lock);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock_irq(&fman->lock);
}
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
@@ -716,14 +771,14 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
timeout = jiffies;
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
- ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
+ ret = ((vmw_fence_obj_signaled(fence)) ?
0 : -EBUSY);
goto out;
}
timeout = (unsigned long)arg->kernel_cookie - timeout;
- ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
+ ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
out:
ttm_base_object_unref(&base);
@@ -758,12 +813,12 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
}
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fence->fman;
+ fman = fman_from_fence(fence);
- arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
- spin_lock_irq(&fman->lock);
+ arg->signaled = vmw_fence_obj_signaled(fence);
- arg->signaled_flags = fence->signaled;
+ arg->signaled_flags = arg->flags;
+ spin_lock_irq(&fman->lock);
arg->passed_seqno = dev_priv->last_read_seqno;
spin_unlock_irq(&fman->lock);
@@ -876,7 +931,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
- struct vmw_fence_manager *fman = eaction->fence->fman;
+ struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
unsigned long irq_flags;
spin_lock_irqsave(&fman->lock, irq_flags);
@@ -900,7 +955,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
- struct vmw_fence_manager *fman = fence->fman;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
unsigned long irq_flags;
bool run_update = false;
@@ -908,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
+ if (fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
@@ -960,7 +1015,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
bool interruptible)
{
struct vmw_event_fence_action *eaction;
- struct vmw_fence_manager *fman = fence->fman;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
unsigned long irq_flags;
@@ -1000,7 +1055,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
bool interruptible)
{
struct vmw_event_fence_pending *event;
- struct drm_device *dev = fence->fman->dev_priv->dev;
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+ struct drm_device *dev = fman->dev_priv->dev;
unsigned long irq_flags;
int ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index faf2e7873860..26a4add39208 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,6 +27,8 @@
#ifndef _VMWGFX_FENCE_H_
+#include <linux/fence.h>
+
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
struct vmw_private;
@@ -50,16 +52,11 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct kref kref;
- u32 seqno;
+ struct fence base;
- struct vmw_fence_manager *fman;
struct list_head head;
- uint32_t signaled;
- uint32_t signal_mask;
struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
- wait_queue_head_t queue;
};
extern struct vmw_fence_manager *
@@ -67,17 +64,29 @@ vmw_fence_manager_init(struct vmw_private *dev_priv);
extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
-extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
+static inline void
+vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
+{
+ struct vmw_fence_obj *fence = *fence_p;
+
+ *fence_p = NULL;
+ if (fence)
+ fence_put(&fence->base);
+}
-extern struct vmw_fence_obj *
-vmw_fence_obj_reference(struct vmw_fence_obj *fence);
+static inline struct vmw_fence_obj *
+vmw_fence_obj_reference(struct vmw_fence_obj *fence)
+{
+ if (fence)
+ fence_get(&fence->base);
+ return fence;
+}
extern void vmw_fences_update(struct vmw_fence_manager *fman);
-extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
- uint32_t flags);
+extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
-extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
+extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
bool lazy,
bool interruptible, unsigned long timeout);
@@ -85,13 +94,11 @@ extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
extern int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
- uint32_t mask,
struct vmw_fence_obj **p_fence);
extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t sequence,
- uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 6ccd993e26bf..d9b4e6959750 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -160,16 +160,21 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_fifo_ping_host_locked(dev_priv, reason);
mutex_unlock(&dev_priv->hw_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 26f8bdde3529..170b61be1e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -46,8 +46,7 @@ struct vmwgfx_gmrid_man {
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- uint32_t flags,
+ const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index 8a8725c2716c..efd1ffd68185 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -31,14 +31,14 @@
struct vmw_marker {
struct list_head head;
uint32_t seqno;
- struct timespec submitted;
+ u64 submitted;
};
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
- queue->lag = ns_to_timespec(0);
- getrawmonotonic(&queue->lag_time);
+ queue->lag = 0;
+ queue->lag_time = ktime_get_raw_ns();
spin_lock_init(&queue->lock);
}
@@ -62,7 +62,7 @@ int vmw_marker_push(struct vmw_marker_queue *queue,
return -ENOMEM;
marker->seqno = seqno;
- getrawmonotonic(&marker->submitted);
+ marker->submitted = ktime_get_raw_ns();
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
@@ -74,14 +74,14 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
- struct timespec now;
bool updated = false;
+ u64 now;
spin_lock(&queue->lock);
- getrawmonotonic(&now);
+ now = ktime_get_raw_ns();
if (list_empty(&queue->head)) {
- queue->lag = ns_to_timespec(0);
+ queue->lag = 0;
queue->lag_time = now;
updated = true;
goto out_unlock;
@@ -91,7 +91,7 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
- queue->lag = timespec_sub(now, marker->submitted);
+ queue->lag = now - marker->submitted;
queue->lag_time = now;
updated = true;
list_del(&marker->head);
@@ -104,27 +104,13 @@ out_unlock:
return (updated) ? 0 : -EBUSY;
}
-static struct timespec vmw_timespec_add(struct timespec t1,
- struct timespec t2)
+static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
{
- t1.tv_sec += t2.tv_sec;
- t1.tv_nsec += t2.tv_nsec;
- if (t1.tv_nsec >= 1000000000L) {
- t1.tv_sec += 1;
- t1.tv_nsec -= 1000000000L;
- }
-
- return t1;
-}
-
-static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
- struct timespec now;
+ u64 now;
spin_lock(&queue->lock);
- getrawmonotonic(&now);
- queue->lag = vmw_timespec_add(queue->lag,
- timespec_sub(now, queue->lag_time));
+ now = ktime_get_raw_ns();
+ queue->lag += now - queue->lag_time;
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
@@ -134,11 +120,9 @@ static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
- struct timespec lag, cond;
+ u64 cond = (u64) us * NSEC_PER_USEC;
- cond = ns_to_timespec((s64) us * 1000);
- lag = vmw_fifo_lag(queue);
- return (timespec_compare(&lag, &cond) < 1);
+ return vmw_fifo_lag(queue) <= cond;
}
int vmw_wait_lag(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a432c0db257c..ff0e03b97753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -567,13 +567,18 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- struct ttm_bo_device *bdev = bo->bdev;
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+ long lret;
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, true,
- !!(flags & drm_vmw_synccpu_dontblock));
- spin_unlock(&bdev->fence_lock);
- return ret;
+ if (nonblock)
+ return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
+
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ if (!lret)
+ return -EBUSY;
+ else if (lret < 0)
+ return lret;
+ return 0;
}
ret = ttm_bo_synccpu_write_grab
@@ -1215,7 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1419,25 +1424,16 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
- struct vmw_fence_obj *old_fence_obj;
+
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
- if (fence == NULL)
+ if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- else
- driver->sync_obj_ref(fence);
-
- spin_lock(&bdev->fence_lock);
-
- old_fence_obj = bo->sync_obj;
- bo->sync_obj = fence;
-
- spin_unlock(&bdev->fence_lock);
-
- if (old_fence_obj)
- vmw_fence_obj_unreference(&old_fence_obj);
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+ fence_put(&fence->base);
+ } else
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
}
/**
@@ -1475,7 +1471,6 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n;
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
@@ -1491,9 +1486,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
list_del_init(&res->mob_head);
}
- spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
}
}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 112f27e51bc7..63bd63f3c7df 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -185,16 +185,16 @@ static unsigned int pin_job(struct host1x_job *job)
struct sg_table *sgt;
dma_addr_t phys_addr;
- reloc->target = host1x_bo_get(reloc->target);
- if (!reloc->target)
+ reloc->target.bo = host1x_bo_get(reloc->target.bo);
+ if (!reloc->target.bo)
goto unpin;
- phys_addr = host1x_bo_pin(reloc->target, &sgt);
+ phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
if (!phys_addr)
goto unpin;
job->addr_phys[job->num_unpins] = phys_addr;
- job->unpins[job->num_unpins].bo = reloc->target;
+ job->unpins[job->num_unpins].bo = reloc->target.bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
}
@@ -235,21 +235,21 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocarray[i];
u32 reloc_addr = (job->reloc_addr_phys[i] +
- reloc->target_offset) >> reloc->shift;
+ reloc->target.offset) >> reloc->shift;
u32 *target;
/* skip all other gathers */
- if (cmdbuf != reloc->cmdbuf)
+ if (cmdbuf != reloc->cmdbuf.bo)
continue;
- if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+ if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
if (cmdbuf_page_addr)
host1x_bo_kunmap(cmdbuf, last_page,
cmdbuf_page_addr);
cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
- reloc->cmdbuf_offset >> PAGE_SHIFT);
- last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+ reloc->cmdbuf.offset >> PAGE_SHIFT);
+ last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
if (unlikely(!cmdbuf_page_addr)) {
pr_err("Could not map cmdbuf for relocation\n");
@@ -257,7 +257,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
}
}
- target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+ target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
*target = reloc_addr;
}
@@ -272,7 +272,7 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
{
offset *= sizeof(u32);
- if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+ if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
return false;
return true;
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 1887972b4ac2..107ec236a4a6 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
-imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o ipu-smfc.o
+imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
+ ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 04e7b2eafbdd..df65d2bca522 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -44,17 +44,6 @@ static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
writel(value, ipu->cm_reg + offset);
}
-static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
-{
- return readl(ipu->idmac_reg + offset);
-}
-
-static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
- unsigned offset)
-{
- writel(value, ipu->idmac_reg + offset);
-}
-
void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
{
u32 val;
@@ -65,457 +54,184 @@ void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
}
EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
-struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel)
-{
- struct ipu_soc *ipu = channel->ipu;
-
- return ipu->cpmem_base + channel->num;
-}
-EXPORT_SYMBOL_GPL(ipu_get_cpmem);
-
-void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel)
-{
- struct ipu_soc *ipu = channel->ipu;
- struct ipu_ch_param __iomem *p = ipu_get_cpmem(channel);
- u32 val;
-
- if (ipu->ipu_type == IPUV3EX)
- ipu_ch_param_write_field(p, IPU_FIELD_ID, 1);
-
- val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(channel->num));
- val |= 1 << (channel->num % 32);
- ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(channel->num));
-};
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
-
-void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v)
-{
- u32 bit = (wbs >> 8) % 160;
- u32 size = wbs & 0xff;
- u32 word = (wbs >> 8) / 160;
- u32 i = bit / 32;
- u32 ofs = bit % 32;
- u32 mask = (1 << size) - 1;
- u32 val;
-
- pr_debug("%s %d %d %d\n", __func__, word, bit , size);
-
- val = readl(&base->word[word].data[i]);
- val &= ~(mask << ofs);
- val |= v << ofs;
- writel(val, &base->word[word].data[i]);
-
- if ((bit + size - 1) / 32 > i) {
- val = readl(&base->word[word].data[i + 1]);
- val &= ~(mask >> (ofs ? (32 - ofs) : 0));
- val |= v >> (ofs ? (32 - ofs) : 0);
- writel(val, &base->word[word].data[i + 1]);
- }
-}
-EXPORT_SYMBOL_GPL(ipu_ch_param_write_field);
-
-u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs)
-{
- u32 bit = (wbs >> 8) % 160;
- u32 size = wbs & 0xff;
- u32 word = (wbs >> 8) / 160;
- u32 i = bit / 32;
- u32 ofs = bit % 32;
- u32 mask = (1 << size) - 1;
- u32 val = 0;
-
- pr_debug("%s %d %d %d\n", __func__, word, bit , size);
-
- val = (readl(&base->word[word].data[i]) >> ofs) & mask;
-
- if ((bit + size - 1) / 32 > i) {
- u32 tmp;
- tmp = readl(&base->word[word].data[i + 1]);
- tmp &= mask >> (ofs ? (32 - ofs) : 0);
- val |= tmp << (ofs ? (32 - ofs) : 0);
- }
-
- return val;
-}
-EXPORT_SYMBOL_GPL(ipu_ch_param_read_field);
-
-int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *p,
- const struct ipu_rgb *rgb)
-{
- int bpp = 0, npb = 0, ro, go, bo, to;
-
- ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
- go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
- bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
- to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
-
- ipu_ch_param_write_field(p, IPU_FIELD_WID0, rgb->red.length - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_OFS0, ro);
- ipu_ch_param_write_field(p, IPU_FIELD_WID1, rgb->green.length - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_OFS1, go);
- ipu_ch_param_write_field(p, IPU_FIELD_WID2, rgb->blue.length - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_OFS2, bo);
-
- if (rgb->transp.length) {
- ipu_ch_param_write_field(p, IPU_FIELD_WID3,
- rgb->transp.length - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_OFS3, to);
- } else {
- ipu_ch_param_write_field(p, IPU_FIELD_WID3, 7);
- ipu_ch_param_write_field(p, IPU_FIELD_OFS3,
- rgb->bits_per_pixel);
- }
-
- switch (rgb->bits_per_pixel) {
- case 32:
- bpp = 0;
- npb = 15;
- break;
- case 24:
- bpp = 1;
- npb = 19;
- break;
- case 16:
- bpp = 3;
- npb = 31;
- break;
- case 8:
- bpp = 5;
- npb = 63;
- break;
- default:
- return -EINVAL;
- }
- ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
- ipu_ch_param_write_field(p, IPU_FIELD_PFS, 7); /* rgb mode */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
-
-int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
- int width)
+enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
{
- int bpp = 0, npb = 0;
-
- switch (width) {
- case 32:
- bpp = 0;
- npb = 15;
- break;
- case 24:
- bpp = 1;
- npb = 19;
- break;
- case 16:
- bpp = 3;
- npb = 31;
- break;
- case 8:
- bpp = 5;
- npb = 63;
- break;
+ switch (drm_fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return IPUV3_COLORSPACE_RGB;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return IPUV3_COLORSPACE_YUV;
default:
- return -EINVAL;
+ return IPUV3_COLORSPACE_UNKNOWN;
}
-
- ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
- ipu_ch_param_write_field(p, IPU_FIELD_PFS, 6); /* raw mode */
-
- return 0;
}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
+EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
-void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
- u32 pixel_format)
+enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
{
- switch (pixel_format) {
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_UYVY:
- ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0xA); /* pix format */
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
- break;
case V4L2_PIX_FMT_YUYV:
- ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0x8); /* pix format */
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
- break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB565:
+ return IPUV3_COLORSPACE_RGB;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
}
}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
+EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
-void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
- u32 pixel_format, int stride, int u_offset, int v_offset)
+bool ipu_pixelformat_is_planar(u32 pixelformat)
{
- switch (pixel_format) {
+ switch (pixelformat) {
case V4L2_PIX_FMT_YUV420:
- ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(p, IPU_FIELD_VBO, v_offset / 8);
- break;
case V4L2_PIX_FMT_YVU420:
- ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_UBO, v_offset / 8);
- ipu_ch_param_write_field(p, IPU_FIELD_VBO, u_offset / 8);
- break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return true;
}
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
-
-void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
- int stride, int height)
-{
- int u_offset, v_offset;
- int uv_stride = 0;
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height / 2);
- ipu_cpmem_set_yuv_planar_full(p, pixel_format, stride,
- u_offset, v_offset);
- break;
- }
+ return false;
}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-
-static const struct ipu_rgb def_rgb_32 = {
- .red = { .offset = 16, .length = 8, },
- .green = { .offset = 8, .length = 8, },
- .blue = { .offset = 0, .length = 8, },
- .transp = { .offset = 24, .length = 8, },
- .bits_per_pixel = 32,
-};
-
-static const struct ipu_rgb def_bgr_32 = {
- .red = { .offset = 0, .length = 8, },
- .green = { .offset = 8, .length = 8, },
- .blue = { .offset = 16, .length = 8, },
- .transp = { .offset = 24, .length = 8, },
- .bits_per_pixel = 32,
-};
-
-static const struct ipu_rgb def_rgb_24 = {
- .red = { .offset = 16, .length = 8, },
- .green = { .offset = 8, .length = 8, },
- .blue = { .offset = 0, .length = 8, },
- .transp = { .offset = 0, .length = 0, },
- .bits_per_pixel = 24,
-};
-
-static const struct ipu_rgb def_bgr_24 = {
- .red = { .offset = 0, .length = 8, },
- .green = { .offset = 8, .length = 8, },
- .blue = { .offset = 16, .length = 8, },
- .transp = { .offset = 0, .length = 0, },
- .bits_per_pixel = 24,
-};
-
-static const struct ipu_rgb def_rgb_16 = {
- .red = { .offset = 11, .length = 5, },
- .green = { .offset = 5, .length = 6, },
- .blue = { .offset = 0, .length = 5, },
- .transp = { .offset = 0, .length = 0, },
- .bits_per_pixel = 16,
-};
+EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
-static const struct ipu_rgb def_bgr_16 = {
- .red = { .offset = 0, .length = 5, },
- .green = { .offset = 5, .length = 6, },
- .blue = { .offset = 11, .length = 5, },
- .transp = { .offset = 0, .length = 0, },
- .bits_per_pixel = 16,
-};
-
-#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 4) + (x) / 2)
-#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * pix->height / 4) + \
- (pix->width * (y) / 4) + (x) / 2)
-
-int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 drm_fourcc)
+enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
{
- switch (drm_fourcc) {
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- /* pix format */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 2);
- /* burst size */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 63);
- break;
- case DRM_FORMAT_UYVY:
- /* bits/pixel */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
- /* pix format */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0xA);
- /* burst size */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
- break;
- case DRM_FORMAT_YUYV:
- /* bits/pixel */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
- /* pix format */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0x8);
- /* burst size */
- ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
- break;
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XBGR8888:
- ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
- break;
- case DRM_FORMAT_BGR888:
- ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
- break;
- case DRM_FORMAT_RGB888:
- ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
- break;
- case DRM_FORMAT_RGB565:
- ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
- break;
- case DRM_FORMAT_BGR565:
- ipu_cpmem_set_format_rgb(cpmem, &def_bgr_16);
- break;
+ switch (mbus_code & 0xf000) {
+ case 0x1000:
+ return IPUV3_COLORSPACE_RGB;
+ case 0x2000:
+ return IPUV3_COLORSPACE_YUV;
default:
- return -EINVAL;
+ return IPUV3_COLORSPACE_UNKNOWN;
}
-
- return 0;
}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
-/*
- * The V4L2 spec defines packed RGB formats in memory byte order, which from
- * point of view of the IPU corresponds to little-endian words with the first
- * component in the least significant bits.
- * The DRM pixel formats and IPU internal representation are ordered the other
- * way around, with the first named component ordered at the most significant
- * bits. Further, V4L2 formats are not well defined:
- * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
- * We choose the interpretation which matches GStreamer behavior.
- */
-static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
{
switch (pixelformat) {
- case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
/*
- * Here we choose the 'corrected' interpretation of RGBP, a
- * little-endian 16-bit word with the red component at the most
- * significant bits:
- * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+ * for the planar YUV formats, the stride passed to
+ * cpmem must be the stride in bytes of the Y plane.
+ * And all the planar YUV formats have an 8-bit
+ * Y component.
*/
- return DRM_FORMAT_RGB565;
+ return (8 * pixel_stride) >> 3;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ return (16 * pixel_stride) >> 3;
case V4L2_PIX_FMT_BGR24:
- /* B G R <=> [24:0] R:G:B */
- return DRM_FORMAT_RGB888;
case V4L2_PIX_FMT_RGB24:
- /* R G B <=> [24:0] B:G:R */
- return DRM_FORMAT_BGR888;
+ return (24 * pixel_stride) >> 3;
case V4L2_PIX_FMT_BGR32:
- /* B G R A <=> [32:0] A:B:G:R */
- return DRM_FORMAT_XRGB8888;
case V4L2_PIX_FMT_RGB32:
- /* R G B A <=> [32:0] A:B:G:R */
- return DRM_FORMAT_XBGR8888;
- case V4L2_PIX_FMT_UYVY:
- return DRM_FORMAT_UYVY;
- case V4L2_PIX_FMT_YUYV:
- return DRM_FORMAT_YUYV;
- case V4L2_PIX_FMT_YUV420:
- return DRM_FORMAT_YUV420;
- case V4L2_PIX_FMT_YVU420:
- return DRM_FORMAT_YVU420;
+ return (32 * pixel_stride) >> 3;
+ default:
+ break;
}
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
-enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
+int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
+ bool hflip, bool vflip)
{
- switch (drm_fourcc) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- return IPUV3_COLORSPACE_RGB;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return IPUV3_COLORSPACE_YUV;
+ u32 r90, vf, hf;
+
+ switch (degrees) {
+ case 0:
+ vf = hf = r90 = 0;
+ break;
+ case 90:
+ vf = hf = 0;
+ r90 = 1;
+ break;
+ case 180:
+ vf = hf = 1;
+ r90 = 0;
+ break;
+ case 270:
+ vf = hf = r90 = 1;
+ break;
default:
- return IPUV3_COLORSPACE_UNKNOWN;
+ return -EINVAL;
}
-}
-EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
-int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
- struct ipu_image *image)
-{
- struct v4l2_pix_format *pix = &image->pix;
- int y_offset, u_offset, v_offset;
+ hf ^= (u32)hflip;
+ vf ^= (u32)vflip;
- pr_debug("%s: resolution: %dx%d stride: %d\n",
- __func__, pix->width, pix->height,
- pix->bytesperline);
+ *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
- ipu_cpmem_set_resolution(cpmem, image->rect.width,
- image->rect.height);
- ipu_cpmem_set_stride(cpmem, pix->bytesperline);
+int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
+ bool hflip, bool vflip)
+{
+ u32 r90, vf, hf;
- ipu_cpmem_set_fmt(cpmem, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
+ r90 = ((u32)mode >> 2) & 0x1;
+ hf = ((u32)mode >> 1) & 0x1;
+ vf = ((u32)mode >> 0) & 0x1;
+ hf ^= (u32)hflip;
+ vf ^= (u32)vflip;
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U_OFFSET(pix, image->rect.left,
- image->rect.top) - y_offset;
- v_offset = V_OFFSET(pix, image->rect.left,
- image->rect.top) - y_offset;
-
- ipu_cpmem_set_yuv_planar_full(cpmem, pix->pixelformat,
- pix->bytesperline, u_offset, v_offset);
- ipu_cpmem_set_buffer(cpmem, 0, image->phys + y_offset);
+ switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
+ case IPU_ROTATE_NONE:
+ *degrees = 0;
break;
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YUYV:
- ipu_cpmem_set_buffer(cpmem, 0, image->phys +
- image->rect.left * 2 +
- image->rect.top * image->pix.bytesperline);
+ case IPU_ROTATE_90_RIGHT:
+ *degrees = 90;
break;
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_BGR32:
- ipu_cpmem_set_buffer(cpmem, 0, image->phys +
- image->rect.left * 4 +
- image->rect.top * image->pix.bytesperline);
+ case IPU_ROTATE_180:
+ *degrees = 180;
break;
- case V4L2_PIX_FMT_RGB565:
- ipu_cpmem_set_buffer(cpmem, 0, image->phys +
- image->rect.left * 2 +
- image->rect.top * image->pix.bytesperline);
- break;
- case V4L2_PIX_FMT_RGB24:
- case V4L2_PIX_FMT_BGR24:
- ipu_cpmem_set_buffer(cpmem, 0, image->phys +
- image->rect.left * 3 +
- image->rect.top * image->pix.bytesperline);
+ case IPU_ROTATE_90_LEFT:
+ *degrees = 270;
break;
default:
return -EINVAL;
@@ -523,27 +239,7 @@ int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
return 0;
}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
-
-enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
-{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YUYV:
- return IPUV3_COLORSPACE_YUV;
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_RGB24:
- case V4L2_PIX_FMT_BGR24:
- case V4L2_PIX_FMT_RGB565:
- return IPUV3_COLORSPACE_RGB;
- default:
- return IPUV3_COLORSPACE_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
+EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
{
@@ -587,7 +283,26 @@ void ipu_idmac_put(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_put);
-#define idma_mask(ch) (1 << (ch & 0x1f))
+#define idma_mask(ch) (1 << ((ch) & 0x1f))
+
+/*
+ * This is an undocumented feature, a write one to a channel bit in
+ * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
+ * internal current buffer pointer so that transfers start from buffer
+ * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
+ * only says these are read-only registers). This operation is required
+ * for channel linking to work correctly, for instance video capture
+ * pipelines that carry out image rotations will fail after the first
+ * streaming unless this function is called for each channel before
+ * re-enabling the channels.
+ */
+static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
+}
void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
bool doublebuffer)
@@ -605,10 +320,81 @@ void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
reg &= ~idma_mask(channel->num);
ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
+ __ipu_idmac_reset_current_buffer(channel);
+
spin_unlock_irqrestore(&ipu->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
+static const struct {
+ int chnum;
+ u32 reg;
+ int shift;
+} idmac_lock_en_info[] = {
+ { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
+ { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
+ { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
+ { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
+ { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
+ { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
+ { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
+ { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
+ { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
+ { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
+ { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
+ { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
+ { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
+ { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
+ { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
+ { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
+ { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
+};
+
+int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 bursts, regval;
+ int i;
+
+ switch (num_bursts) {
+ case 0:
+ case 1:
+ bursts = 0x00; /* locking disabled */
+ break;
+ case 2:
+ bursts = 0x01;
+ break;
+ case 4:
+ bursts = 0x02;
+ break;
+ case 8:
+ bursts = 0x03;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
+ if (channel->num == idmac_lock_en_info[i].chnum)
+ break;
+ }
+ if (i >= ARRAY_SIZE(idmac_lock_en_info))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
+ regval &= ~(0x03 << idmac_lock_en_info[i].shift);
+ regval |= (bursts << idmac_lock_en_info[i].shift);
+ ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
+
int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
{
unsigned long lock_flags;
@@ -661,30 +447,6 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
}
EXPORT_SYMBOL_GPL(ipu_module_disable);
-int ipu_csi_enable(struct ipu_soc *ipu, int csi)
-{
- return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_enable);
-
-int ipu_csi_disable(struct ipu_soc *ipu, int csi)
-{
- return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_disable);
-
-int ipu_smfc_enable(struct ipu_soc *ipu)
-{
- return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
-}
-EXPORT_SYMBOL_GPL(ipu_smfc_enable);
-
-int ipu_smfc_disable(struct ipu_soc *ipu)
-{
- return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
-}
-EXPORT_SYMBOL_GPL(ipu_smfc_disable);
-
int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
@@ -694,6 +456,30 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
+bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 reg = 0;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+ switch (buf_num) {
+ case 0:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
+ break;
+ case 1:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
+ break;
+ case 2:
+ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
+ break;
+ }
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return ((reg & idma_mask(channel->num)) != 0);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
+
void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
{
struct ipu_soc *ipu = channel->ipu;
@@ -712,6 +498,34 @@ void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
}
EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
+void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
+ switch (buf_num) {
+ case 0:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+ break;
+ case 1:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+ break;
+ case 2:
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
+ break;
+ default:
+ break;
+ }
+ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
+
int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
@@ -782,6 +596,8 @@ int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
val &= ~idma_mask(channel->num);
ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+ __ipu_idmac_reset_current_buffer(channel);
+
/* Set channel buffers NOT to be ready */
ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
@@ -810,6 +626,31 @@ int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
+/*
+ * The imx6 rev. D TRM says that enabling the WM feature will increase
+ * a channel's priority. Refer to Table 36-8 Calculated priority value.
+ * The sub-module that is the sink or source for the channel must enable
+ * watermark signal for this to take effect (SMFC_WM for instance).
+ */
+void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
+ if (enable)
+ val |= 1 << (channel->num % 32);
+ else
+ val &= ~(1 << (channel->num % 32));
+ ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
+
static int ipu_memory_reset(struct ipu_soc *ipu)
{
unsigned long timeout;
@@ -826,12 +667,66 @@ static int ipu_memory_reset(struct ipu_soc *ipu)
return 0;
}
+/*
+ * Set the source mux for the given CSI. Selects either parallel or
+ * MIPI CSI2 sources.
+ */
+void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
+{
+ unsigned long flags;
+ u32 val, mask;
+
+ mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
+ IPU_CONF_CSI0_DATA_SOURCE;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ if (mipi_csi2)
+ val |= mask;
+ else
+ val &= ~mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
+
+/*
+ * Set the source mux for the IC. Selects either CSI[01] or the VDI.
+ */
+void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ if (vdi) {
+ val |= IPU_CONF_IC_INPUT;
+ } else {
+ val &= ~IPU_CONF_IC_INPUT;
+ if (csi_id == 1)
+ val |= IPU_CONF_CSI_SEL;
+ else
+ val &= ~IPU_CONF_CSI_SEL;
+ }
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
+
struct ipu_devtype {
const char *name;
unsigned long cm_ofs;
unsigned long cpmem_ofs;
unsigned long srm_ofs;
unsigned long tpm_ofs;
+ unsigned long csi0_ofs;
+ unsigned long csi1_ofs;
+ unsigned long ic_ofs;
unsigned long disp0_ofs;
unsigned long disp1_ofs;
unsigned long dc_tmpl_ofs;
@@ -845,6 +740,9 @@ static struct ipu_devtype ipu_type_imx51 = {
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
+ .csi0_ofs = 0x1f030000,
+ .csi1_ofs = 0x1f038000,
+ .ic_ofs = 0x1f020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
.dc_tmpl_ofs = 0x1f080000,
@@ -858,6 +756,9 @@ static struct ipu_devtype ipu_type_imx53 = {
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
+ .csi0_ofs = 0x07030000,
+ .csi1_ofs = 0x07038000,
+ .ic_ofs = 0x07020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,
.dc_tmpl_ofs = 0x07080000,
@@ -871,6 +772,9 @@ static struct ipu_devtype ipu_type_imx6q = {
.cpmem_ofs = 0x00300000,
.srm_ofs = 0x00340000,
.tpm_ofs = 0x00360000,
+ .csi0_ofs = 0x00230000,
+ .csi1_ofs = 0x00238000,
+ .ic_ofs = 0x00220000,
.disp0_ofs = 0x00240000,
.disp1_ofs = 0x00248000,
.dc_tmpl_ofs = 0x00380000,
@@ -895,8 +799,36 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
struct device *dev = &pdev->dev;
const struct ipu_devtype *devtype = ipu->devtype;
+ ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
+ if (ret) {
+ unit = "cpmem";
+ goto err_cpmem;
+ }
+
+ ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
+ IPU_CONF_CSI0_EN, ipu_clk);
+ if (ret) {
+ unit = "csi0";
+ goto err_csi_0;
+ }
+
+ ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
+ IPU_CONF_CSI1_EN, ipu_clk);
+ if (ret) {
+ unit = "csi1";
+ goto err_csi_1;
+ }
+
+ ret = ipu_ic_init(ipu, dev,
+ ipu_base + devtype->ic_ofs,
+ ipu_base + devtype->tpm_ofs);
+ if (ret) {
+ unit = "ic";
+ goto err_ic;
+ }
+
ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
- IPU_CONF_DI0_EN, ipu_clk);
+ IPU_CONF_DI0_EN, ipu_clk);
if (ret) {
unit = "di0";
goto err_di_0;
@@ -949,6 +881,14 @@ err_dc:
err_di_1:
ipu_di_exit(ipu, 0);
err_di_0:
+ ipu_ic_exit(ipu);
+err_ic:
+ ipu_csi_exit(ipu, 1);
+err_csi_1:
+ ipu_csi_exit(ipu, 0);
+err_csi_0:
+ ipu_cpmem_exit(ipu);
+err_cpmem:
dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
return ret;
}
@@ -1025,6 +965,10 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
ipu_dc_exit(ipu);
ipu_di_exit(ipu, 1);
ipu_di_exit(ipu, 0);
+ ipu_ic_exit(ipu);
+ ipu_csi_exit(ipu, 1);
+ ipu_csi_exit(ipu, 0);
+ ipu_cpmem_exit(ipu);
}
static int platform_remove_devices_fn(struct device *dev, void *unused)
@@ -1201,6 +1145,44 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
irq_domain_remove(ipu->domain);
}
+void ipu_dump(struct ipu_soc *ipu)
+{
+ int i;
+
+ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CONF));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
+ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
+ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
+ ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
+ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
+ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
+ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
+ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
+ for (i = 0; i < 15; i++)
+ dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
+ ipu_cm_read(ipu, IPU_INT_CTRL(i)));
+}
+EXPORT_SYMBOL_GPL(ipu_dump);
+
static int ipu_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -1243,6 +1225,12 @@ static int ipu_probe(struct platform_device *pdev)
ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
ipu_base + devtype->cpmem_ofs);
+ dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
+ ipu_base + devtype->csi0_ofs);
+ dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
+ ipu_base + devtype->csi1_ofs);
+ dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
+ ipu_base + devtype->ic_ofs);
dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
ipu_base + devtype->disp0_ofs);
dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
@@ -1265,10 +1253,8 @@ static int ipu_probe(struct platform_device *pdev)
ipu->idmac_reg = devm_ioremap(&pdev->dev,
ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
PAGE_SIZE);
- ipu->cpmem_base = devm_ioremap(&pdev->dev,
- ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
- if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base)
+ if (!ipu->cm_reg || !ipu->idmac_reg)
return -ENOMEM;
ipu->clk = devm_clk_get(&pdev->dev, "bus");
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
new file mode 100644
index 000000000000..3bf05bc4ab67
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2012 Mentor Graphics Inc.
+ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/types.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <drm/drm_fourcc.h>
+#include "ipu-prv.h"
+
+struct ipu_cpmem_word {
+ u32 data[5];
+ u32 res[3];
+};
+
+struct ipu_ch_param {
+ struct ipu_cpmem_word word[2];
+};
+
+struct ipu_cpmem {
+ struct ipu_ch_param __iomem *base;
+ u32 module;
+ spinlock_t lock;
+ int use_count;
+ struct ipu_soc *ipu;
+};
+
+#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
+
+#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
+#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
+#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
+#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
+#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
+#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
+#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
+
+#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
+#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
+#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
+#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
+#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
+#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
+#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
+#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
+#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
+#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
+#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
+#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
+#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
+#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
+#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
+#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
+#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
+#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
+#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
+#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
+#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
+#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
+#define IPU_FIELD_ROT_HF_VF IPU_CPMEM_WORD(0, 119, 3)
+#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
+#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
+#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
+#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
+#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
+#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
+#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
+#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
+#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
+#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
+#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
+#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
+#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
+#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
+#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
+#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
+#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
+#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
+#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
+#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
+#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
+#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
+#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
+#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
+#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
+#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
+#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
+#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
+
+static inline struct ipu_ch_param __iomem *
+ipu_get_cpmem(struct ipuv3_channel *ch)
+{
+ struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv;
+
+ return cpmem->base + ch->num;
+}
+
+static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v)
+{
+ struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = readl(&base->word[word].data[i]);
+ val &= ~(mask << ofs);
+ val |= v << ofs;
+ writel(val, &base->word[word].data[i]);
+
+ if ((bit + size - 1) / 32 > i) {
+ val = readl(&base->word[word].data[i + 1]);
+ val &= ~(mask >> (ofs ? (32 - ofs) : 0));
+ val |= v >> (ofs ? (32 - ofs) : 0);
+ writel(val, &base->word[word].data[i + 1]);
+ }
+}
+
+static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs)
+{
+ struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val = 0;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = (readl(&base->word[word].data[i]) >> ofs) & mask;
+
+ if ((bit + size - 1) / 32 > i) {
+ u32 tmp;
+
+ tmp = readl(&base->word[word].data[i + 1]);
+ tmp &= mask >> (ofs ? (32 - ofs) : 0);
+ val |= tmp << (ofs ? (32 - ofs) : 0);
+ }
+
+ return val;
+}
+
+/*
+ * The V4L2 spec defines packed RGB formats in memory byte order, which from
+ * point of view of the IPU corresponds to little-endian words with the first
+ * component in the least significant bits.
+ * The DRM pixel formats and IPU internal representation are ordered the other
+ * way around, with the first named component ordered at the most significant
+ * bits. Further, V4L2 formats are not well defined:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
+ * We choose the interpretation which matches GStreamer behavior.
+ */
+static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_RGB565:
+ /*
+ * Here we choose the 'corrected' interpretation of RGBP, a
+ * little-endian 16-bit word with the red component at the most
+ * significant bits:
+ * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+ */
+ return DRM_FORMAT_RGB565;
+ case V4L2_PIX_FMT_BGR24:
+ /* B G R <=> [24:0] R:G:B */
+ return DRM_FORMAT_RGB888;
+ case V4L2_PIX_FMT_RGB24:
+ /* R G B <=> [24:0] B:G:R */
+ return DRM_FORMAT_BGR888;
+ case V4L2_PIX_FMT_BGR32:
+ /* B G R A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_RGB32:
+ /* R G B A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_UYVY:
+ return DRM_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return DRM_FORMAT_YUYV;
+ case V4L2_PIX_FMT_YUV420:
+ return DRM_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YUV422P:
+ return DRM_FORMAT_YUV422;
+ case V4L2_PIX_FMT_YVU420:
+ return DRM_FORMAT_YVU420;
+ case V4L2_PIX_FMT_NV12:
+ return DRM_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV16:
+ return DRM_FORMAT_NV16;
+ }
+
+ return -EINVAL;
+}
+
+void ipu_cpmem_zero(struct ipuv3_channel *ch)
+{
+ struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+ void __iomem *base = p;
+ int i;
+
+ for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
+ writel(0, base + i * sizeof(u32));
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_zero);
+
+void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
+
+void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_stride);
+
+void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch)
+{
+ struct ipu_soc *ipu = ch->ipu;
+ u32 val;
+
+ if (ipu->ipu_type == IPUV3EX)
+ ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1);
+
+ val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num));
+ val |= 1 << (ch->num % 32);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num));
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
+
+void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
+{
+ if (bufnum)
+ ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
+ else
+ ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
+
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
+
+void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
+{
+ id &= 0x3;
+ ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
+
+void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
+
+void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
+
+void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
+ enum ipu_rotate_mode rot)
+{
+ u32 temp_rot = bitrev8(rot) >> 5;
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
+
+int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
+ const struct ipu_rgb *rgb)
+{
+ int bpp = 0, npb = 0, ro, go, bo, to;
+
+ ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
+ go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
+ bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
+ to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro);
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go);
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo);
+
+ if (rgb->transp.length) {
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID3,
+ rgb->transp.length - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
+ } else {
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
+ ipu_ch_param_write_field(ch, IPU_FIELD_OFS3,
+ rgb->bits_per_pixel);
+ }
+
+ switch (rgb->bits_per_pixel) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
+
+int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
+{
+ int bpp = 0, npb = 0;
+
+ switch (width) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
+
+void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_UYVY:
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
+
+void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride,
+ int u_offset, int v_offset)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV422P:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
+
+void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
+ u32 pixel_format, int stride, int height)
+{
+ int u_offset, v_offset;
+ int uv_stride = 0;
+
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ uv_stride = stride / 2;
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height / 2);
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ uv_stride = stride / 2;
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height);
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ u_offset = stride * height;
+ ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
+ u_offset, 0);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
+
+static const struct ipu_rgb def_rgb_32 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_bgr_32 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_rgb_24 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_bgr_24 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_rgb_16 = {
+ .red = { .offset = 11, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 0, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+static const struct ipu_rgb def_bgr_16 = {
+ .red = { .offset = 0, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 11, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
+#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 4) + (x) / 2)
+#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * pix->height / 4) + \
+ (pix->width * (y) / 4) + (x) / 2)
+#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * pix->height / 2) + \
+ (pix->width * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 2) + (x))
+#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * y) + (x))
+
+int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_NV12:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_NV16:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_UYVY:
+ /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_YUYV:
+ /* bits/pixel */
+ ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_32);
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_32);
+ break;
+ case DRM_FORMAT_BGR888:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
+ break;
+ case DRM_FORMAT_RGB888:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
+ break;
+ case DRM_FORMAT_RGB565:
+ ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
+ break;
+ case DRM_FORMAT_BGR565:
+ ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+
+int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
+{
+ struct v4l2_pix_format *pix = &image->pix;
+ int offset, u_offset, v_offset;
+
+ pr_debug("%s: resolution: %dx%d stride: %d\n",
+ __func__, pix->width, pix->height,
+ pix->bytesperline);
+
+ ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height);
+ ipu_cpmem_set_stride(ch, pix->bytesperline);
+
+ ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = UV_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = 0;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_NV16:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = UV2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = 0;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
+ pix->bytesperline,
+ u_offset, v_offset);
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_RGB565:
+ offset = image->rect.left * 2 +
+ image->rect.top * pix->bytesperline;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ offset = image->rect.left * 4 +
+ image->rect.top * pix->bytesperline;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ offset = image->rect.left * 3 +
+ image->rect.top * pix->bytesperline;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
+ ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
+
+void ipu_cpmem_dump(struct ipuv3_channel *ch)
+{
+ struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+ struct ipu_soc *ipu = ch->ipu;
+ int chno = ch->num;
+
+ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
+ readl(&p->word[0].data[0]),
+ readl(&p->word[0].data[1]),
+ readl(&p->word[0].data[2]),
+ readl(&p->word[0].data[3]),
+ readl(&p->word[0].data[4]));
+ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
+ readl(&p->word[1].data[0]),
+ readl(&p->word[1].data[1]),
+ readl(&p->word[1].data[2]),
+ readl(&p->word[1].data[3]),
+ readl(&p->word[1].data[4]));
+ dev_dbg(ipu->dev, "PFS 0x%x, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
+ dev_dbg(ipu->dev, "BPP 0x%x, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
+ dev_dbg(ipu->dev, "NPB 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
+
+ dev_dbg(ipu->dev, "FW %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_FW));
+ dev_dbg(ipu->dev, "FH %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_FH));
+ dev_dbg(ipu->dev, "EBA0 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
+ dev_dbg(ipu->dev, "EBA1 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
+ dev_dbg(ipu->dev, "Stride %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SL));
+ dev_dbg(ipu->dev, "scan_order %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SO));
+ dev_dbg(ipu->dev, "uv_stride %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
+ dev_dbg(ipu->dev, "u_offset 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
+ dev_dbg(ipu->dev, "v_offset 0x%x\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
+
+ dev_dbg(ipu->dev, "Width0 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
+ dev_dbg(ipu->dev, "Width1 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
+ dev_dbg(ipu->dev, "Width2 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
+ dev_dbg(ipu->dev, "Width3 %d+1, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
+ dev_dbg(ipu->dev, "Offset0 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
+ dev_dbg(ipu->dev, "Offset1 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
+ dev_dbg(ipu->dev, "Offset2 %d, ",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
+ dev_dbg(ipu->dev, "Offset3 %d\n",
+ ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
+
+int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+{
+ struct ipu_cpmem *cpmem;
+
+ cpmem = devm_kzalloc(dev, sizeof(*cpmem), GFP_KERNEL);
+ if (!cpmem)
+ return -ENOMEM;
+
+ ipu->cpmem_priv = cpmem;
+
+ spin_lock_init(&cpmem->lock);
+ cpmem->base = devm_ioremap(dev, base, SZ_128K);
+ if (!cpmem->base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "CPMEM base: 0x%08lx remapped to %p\n",
+ base, cpmem->base);
+ cpmem->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_cpmem_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
new file mode 100644
index 000000000000..d6f56471bd2a
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2012-2014 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <uapi/linux/v4l2-mediabus.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+#include "ipu-prv.h"
+
+struct ipu_csi {
+ void __iomem *base;
+ int id;
+ u32 module;
+ struct clk *clk_ipu; /* IPU bus clock */
+ spinlock_t lock;
+ bool inuse;
+ struct ipu_soc *ipu;
+};
+
+/* CSI Register Offsets */
+#define CSI_SENS_CONF 0x0000
+#define CSI_SENS_FRM_SIZE 0x0004
+#define CSI_ACT_FRM_SIZE 0x0008
+#define CSI_OUT_FRM_CTRL 0x000c
+#define CSI_TST_CTRL 0x0010
+#define CSI_CCIR_CODE_1 0x0014
+#define CSI_CCIR_CODE_2 0x0018
+#define CSI_CCIR_CODE_3 0x001c
+#define CSI_MIPI_DI 0x0020
+#define CSI_SKIP 0x0024
+#define CSI_CPD_CTRL 0x0028
+#define CSI_CPD_RC(n) (0x002c + ((n)*4))
+#define CSI_CPD_RS(n) (0x004c + ((n)*4))
+#define CSI_CPD_GRC(n) (0x005c + ((n)*4))
+#define CSI_CPD_GRS(n) (0x007c + ((n)*4))
+#define CSI_CPD_GBC(n) (0x008c + ((n)*4))
+#define CSI_CPD_GBS(n) (0x00Ac + ((n)*4))
+#define CSI_CPD_BC(n) (0x00Bc + ((n)*4))
+#define CSI_CPD_BS(n) (0x00Dc + ((n)*4))
+#define CSI_CPD_OFFSET1 0x00ec
+#define CSI_CPD_OFFSET2 0x00f0
+
+/* CSI Register Fields */
+#define CSI_SENS_CONF_DATA_FMT_SHIFT 8
+#define CSI_SENS_CONF_DATA_FMT_MASK 0x00000700
+#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 0L
+#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV 1L
+#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY 2L
+#define CSI_SENS_CONF_DATA_FMT_BAYER 3L
+#define CSI_SENS_CONF_DATA_FMT_RGB565 4L
+#define CSI_SENS_CONF_DATA_FMT_RGB555 5L
+#define CSI_SENS_CONF_DATA_FMT_RGB444 6L
+#define CSI_SENS_CONF_DATA_FMT_JPEG 7L
+
+#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0
+#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1
+#define CSI_SENS_CONF_DATA_POL_SHIFT 2
+#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3
+#define CSI_SENS_CONF_SENS_PRTCL_MASK 0x00000070
+#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4
+#define CSI_SENS_CONF_PACK_TIGHT_SHIFT 7
+#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 11
+#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15
+#define CSI_SENS_CONF_DIVRATIO_SHIFT 16
+
+#define CSI_SENS_CONF_DIVRATIO_MASK 0x00ff0000
+#define CSI_SENS_CONF_DATA_DEST_SHIFT 24
+#define CSI_SENS_CONF_DATA_DEST_MASK 0x07000000
+#define CSI_SENS_CONF_JPEG8_EN_SHIFT 27
+#define CSI_SENS_CONF_JPEG_EN_SHIFT 28
+#define CSI_SENS_CONF_FORCE_EOF_SHIFT 29
+#define CSI_SENS_CONF_DATA_EN_POL_SHIFT 31
+
+#define CSI_DATA_DEST_IC 2
+#define CSI_DATA_DEST_IDMAC 4
+
+#define CSI_CCIR_ERR_DET_EN 0x01000000
+#define CSI_HORI_DOWNSIZE_EN 0x80000000
+#define CSI_VERT_DOWNSIZE_EN 0x40000000
+#define CSI_TEST_GEN_MODE_EN 0x01000000
+
+#define CSI_HSC_MASK 0x1fff0000
+#define CSI_HSC_SHIFT 16
+#define CSI_VSC_MASK 0x00000fff
+#define CSI_VSC_SHIFT 0
+
+#define CSI_TEST_GEN_R_MASK 0x000000ff
+#define CSI_TEST_GEN_R_SHIFT 0
+#define CSI_TEST_GEN_G_MASK 0x0000ff00
+#define CSI_TEST_GEN_G_SHIFT 8
+#define CSI_TEST_GEN_B_MASK 0x00ff0000
+#define CSI_TEST_GEN_B_SHIFT 16
+
+#define CSI_MAX_RATIO_SKIP_SMFC_MASK 0x00000007
+#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT 0
+#define CSI_SKIP_SMFC_MASK 0x000000f8
+#define CSI_SKIP_SMFC_SHIFT 3
+#define CSI_ID_2_SKIP_MASK 0x00000300
+#define CSI_ID_2_SKIP_SHIFT 8
+
+#define CSI_COLOR_FIRST_ROW_MASK 0x00000002
+#define CSI_COLOR_FIRST_COMP_MASK 0x00000001
+
+/* MIPI CSI-2 data types */
+#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
+#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
+#define MIPI_DT_YUV422 0x1e /* UYVY... */
+#define MIPI_DT_RGB444 0x20
+#define MIPI_DT_RGB555 0x21
+#define MIPI_DT_RGB565 0x22
+#define MIPI_DT_RGB666 0x23
+#define MIPI_DT_RGB888 0x24
+#define MIPI_DT_RAW6 0x28
+#define MIPI_DT_RAW7 0x29
+#define MIPI_DT_RAW8 0x2a
+#define MIPI_DT_RAW10 0x2b
+#define MIPI_DT_RAW12 0x2c
+#define MIPI_DT_RAW14 0x2d
+
+/*
+ * Bitfield of CSI bus signal polarities and modes.
+ */
+struct ipu_csi_bus_config {
+ unsigned data_width:4;
+ unsigned clk_mode:3;
+ unsigned ext_vsync:1;
+ unsigned vsync_pol:1;
+ unsigned hsync_pol:1;
+ unsigned pixclk_pol:1;
+ unsigned data_pol:1;
+ unsigned sens_clksrc:1;
+ unsigned pack_tight:1;
+ unsigned force_eof:1;
+ unsigned data_en_pol:1;
+
+ unsigned data_fmt;
+ unsigned mipi_dt;
+};
+
+/*
+ * Enumeration of CSI data bus widths.
+ */
+enum ipu_csi_data_width {
+ IPU_CSI_DATA_WIDTH_4 = 0,
+ IPU_CSI_DATA_WIDTH_8 = 1,
+ IPU_CSI_DATA_WIDTH_10 = 3,
+ IPU_CSI_DATA_WIDTH_12 = 5,
+ IPU_CSI_DATA_WIDTH_16 = 9,
+};
+
+/*
+ * Enumeration of CSI clock modes.
+ */
+enum ipu_csi_clk_mode {
+ IPU_CSI_CLK_MODE_GATED_CLK,
+ IPU_CSI_CLK_MODE_NONGATED_CLK,
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
+};
+
+static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
+{
+ return readl(csi->base + offset);
+}
+
+static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
+ unsigned offset)
+{
+ writel(value, csi->base + offset);
+}
+
+/*
+ * Set mclk division ratio for generating test mode mclk. Only used
+ * for test generator.
+ */
+static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
+ u32 ipu_clk)
+{
+ u32 temp;
+ u32 div_ratio;
+
+ div_ratio = (ipu_clk / pixel_clk) - 1;
+
+ if (div_ratio > 0xFF || div_ratio < 0) {
+ dev_err(csi->ipu->dev,
+ "value of pixel_clk extends normal range\n");
+ return -EINVAL;
+ }
+
+ temp = ipu_csi_read(csi, CSI_SENS_CONF);
+ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
+ ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
+ CSI_SENS_CONF);
+
+ return 0;
+}
+
+/*
+ * Find the CSI data format and data width for the given V4L2 media
+ * bus pixel format code.
+ */
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+{
+ switch (mbus_code) {
+ case V4L2_MBUS_FMT_BGR565_2X8_BE:
+ case V4L2_MBUS_FMT_BGR565_2X8_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ cfg->mipi_dt = MIPI_DT_RGB565;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
+ cfg->mipi_dt = MIPI_DT_RGB444;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
+ cfg->mipi_dt = MIPI_DT_RGB555;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ break;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW8;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW10;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW10;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_10;
+ break;
+ case V4L2_MBUS_FMT_SBGGR12_1X12:
+ case V4L2_MBUS_FMT_SGBRG12_1X12:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case V4L2_MBUS_FMT_SRGGB12_1X12:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->mipi_dt = MIPI_DT_RAW12;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_12;
+ break;
+ case V4L2_MBUS_FMT_JPEG_1X8:
+ /* TODO */
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
+ cfg->mipi_dt = MIPI_DT_RAW8;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
+ */
+static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ memset(csicfg, 0, sizeof(*csicfg));
+
+ mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+
+ switch (mbus_cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ csicfg->ext_vsync = 1;
+ csicfg->vsync_pol = (mbus_cfg->flags &
+ V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
+ csicfg->hsync_pol = (mbus_cfg->flags &
+ V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
+ csicfg->pixclk_pol = (mbus_cfg->flags &
+ V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
+ break;
+ case V4L2_MBUS_BT656:
+ csicfg->ext_vsync = 0;
+ if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+ else
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+ break;
+ case V4L2_MBUS_CSI2:
+ /*
+ * MIPI CSI-2 requires non gated clock mode, all other
+ * parameters are not applicable for MIPI CSI-2 bus.
+ */
+ csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
+ break;
+ default:
+ /* will never get here, keep compiler quiet */
+ break;
+ }
+}
+
+int ipu_csi_init_interface(struct ipu_csi *csi,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct ipu_csi_bus_config cfg;
+ unsigned long flags;
+ u32 data = 0;
+
+ fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+
+ /* Set the CSI_SENS_CONF register remaining fields */
+ data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
+ cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
+ cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
+ cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
+ cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
+ cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
+ cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
+ cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
+ cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
+ cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
+ cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ ipu_csi_write(csi, data, CSI_SENS_CONF);
+
+ /* Setup sensor frame size */
+ ipu_csi_write(csi,
+ (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
+ CSI_SENS_FRM_SIZE);
+
+ /* Set CCIR registers */
+
+ switch (cfg.clk_mode) {
+ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+ ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ break;
+ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+ if (mbus_fmt->width == 720 && mbus_fmt->height == 576) {
+ /*
+ * PAL case
+ *
+ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
+ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
+ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
+ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
+ */
+ ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+
+ } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
+ /*
+ * NTSC case
+ *
+ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
+ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
+ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
+ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
+ */
+ ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ } else {
+ dev_err(csi->ipu->dev,
+ "Unsupported CCIR656 interlaced video mode\n");
+ spin_unlock_irqrestore(&csi->lock, flags);
+ return -EINVAL;
+ }
+ break;
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+ ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ break;
+ case IPU_CSI_CLK_MODE_GATED_CLK:
+ case IPU_CSI_CLK_MODE_NONGATED_CLK:
+ ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
+ break;
+ }
+
+ dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
+ ipu_csi_read(csi, CSI_SENS_CONF));
+ dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
+ ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
+
+bool ipu_csi_is_interlaced(struct ipu_csi *csi)
+{
+ unsigned long flags;
+ u32 sensor_protocol;
+
+ spin_lock_irqsave(&csi->lock, flags);
+ sensor_protocol =
+ (ipu_csi_read(csi, CSI_SENS_CONF) &
+ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
+ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ switch (sensor_protocol) {
+ case IPU_CSI_CLK_MODE_GATED_CLK:
+ case IPU_CSI_CLK_MODE_NONGATED_CLK:
+ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+ return false;
+ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+ return true;
+ default:
+ dev_err(csi->ipu->dev,
+ "CSI %d sensor protocol unsupported\n", csi->id);
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
+
+void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
+ w->width = (reg & 0xFFFF) + 1;
+ w->height = (reg >> 16 & 0xFFFF) + 1;
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
+ w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_get_window);
+
+void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
+ CSI_ACT_FRM_SIZE);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
+ reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+
+void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
+ u32 r_value, u32 g_value, u32 b_value,
+ u32 pix_clk)
+{
+ unsigned long flags;
+ u32 ipu_clk = clk_get_rate(csi->clk_ipu);
+ u32 temp;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_TST_CTRL);
+
+ if (active == false) {
+ temp &= ~CSI_TEST_GEN_MODE_EN;
+ ipu_csi_write(csi, temp, CSI_TST_CTRL);
+ } else {
+ /* Set sensb_mclk div_ratio */
+ ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
+
+ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
+ CSI_TEST_GEN_B_MASK);
+ temp |= CSI_TEST_GEN_MODE_EN;
+ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
+ (g_value << CSI_TEST_GEN_G_SHIFT) |
+ (b_value << CSI_TEST_GEN_B_SHIFT);
+ ipu_csi_write(csi, temp, CSI_TST_CTRL);
+ }
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
+
+int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct ipu_csi_bus_config cfg;
+ unsigned long flags;
+ u32 temp;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_MIPI_DI);
+ temp &= ~(0xff << (vc * 8));
+ temp |= (cfg.mipi_dt << (vc * 8));
+ ipu_csi_write(csi, temp, CSI_MIPI_DI);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
+
+int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
+ u32 max_ratio, u32 id)
+{
+ unsigned long flags;
+ u32 temp;
+
+ if (max_ratio > 5 || id > 3)
+ return -EINVAL;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ temp = ipu_csi_read(csi, CSI_SKIP);
+ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
+ CSI_SKIP_SMFC_MASK);
+ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
+ (id << CSI_ID_2_SKIP_SHIFT) |
+ (skip << CSI_SKIP_SMFC_SHIFT);
+ ipu_csi_write(csi, temp, CSI_SKIP);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
+
+int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
+{
+ unsigned long flags;
+ u32 csi_sens_conf, dest;
+
+ if (csi_dest == IPU_CSI_DEST_IDMAC)
+ dest = CSI_DATA_DEST_IDMAC;
+ else
+ dest = CSI_DATA_DEST_IC; /* IC or VDIC */
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
+ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
+ csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
+ ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
+
+int ipu_csi_enable(struct ipu_csi *csi)
+{
+ ipu_module_enable(csi->ipu, csi->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_enable);
+
+int ipu_csi_disable(struct ipu_csi *csi)
+{
+ ipu_module_disable(csi->ipu, csi->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_disable);
+
+struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
+{
+ unsigned long flags;
+ struct ipu_csi *csi, *ret;
+
+ if (id > 1)
+ return ERR_PTR(-EINVAL);
+
+ csi = ipu->csi_priv[id];
+ ret = csi;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ if (csi->inuse) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ csi->inuse = true;
+unlock:
+ spin_unlock_irqrestore(&csi->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_csi_get);
+
+void ipu_csi_put(struct ipu_csi *csi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->lock, flags);
+ csi->inuse = false;
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_put);
+
+int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *clk_ipu)
+{
+ struct ipu_csi *csi;
+
+ if (id > 1)
+ return -ENODEV;
+
+ csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ ipu->csi_priv[id] = csi;
+
+ spin_lock_init(&csi->lock);
+ csi->module = module;
+ csi->id = id;
+ csi->clk_ipu = clk_ipu;
+ csi->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!csi->base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
+ id, base, csi->base);
+ csi->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_csi_exit(struct ipu_soc *ipu, int id)
+{
+}
+
+void ipu_csi_dump(struct ipu_csi *csi)
+{
+ dev_dbg(csi->ipu->dev, "CSI_SENS_CONF: %08x\n",
+ ipu_csi_read(csi, CSI_SENS_CONF));
+ dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
+ ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
+ dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE: %08x\n",
+ ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+ dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL: %08x\n",
+ ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
+ dev_dbg(csi->ipu->dev, "CSI_TST_CTRL: %08x\n",
+ ipu_csi_read(csi, CSI_TST_CTRL));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_1));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_2));
+ dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3: %08x\n",
+ ipu_csi_read(csi, CSI_CCIR_CODE_3));
+ dev_dbg(csi->ipu->dev, "CSI_MIPI_DI: %08x\n",
+ ipu_csi_read(csi, CSI_MIPI_DI));
+ dev_dbg(csi->ipu->dev, "CSI_SKIP: %08x\n",
+ ipu_csi_read(csi, CSI_SKIP));
+}
+EXPORT_SYMBOL_GPL(ipu_csi_dump);
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
new file mode 100644
index 000000000000..ad75588e1629
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (C) 2012-2014 Mentor Graphics Inc.
+ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "ipu-prv.h"
+
+/* IC Register Offsets */
+#define IC_CONF 0x0000
+#define IC_PRP_ENC_RSC 0x0004
+#define IC_PRP_VF_RSC 0x0008
+#define IC_PP_RSC 0x000C
+#define IC_CMBP_1 0x0010
+#define IC_CMBP_2 0x0014
+#define IC_IDMAC_1 0x0018
+#define IC_IDMAC_2 0x001C
+#define IC_IDMAC_3 0x0020
+#define IC_IDMAC_4 0x0024
+
+/* IC Register Fields */
+#define IC_CONF_PRPENC_EN (1 << 0)
+#define IC_CONF_PRPENC_CSC1 (1 << 1)
+#define IC_CONF_PRPENC_ROT_EN (1 << 2)
+#define IC_CONF_PRPVF_EN (1 << 8)
+#define IC_CONF_PRPVF_CSC1 (1 << 9)
+#define IC_CONF_PRPVF_CSC2 (1 << 10)
+#define IC_CONF_PRPVF_CMB (1 << 11)
+#define IC_CONF_PRPVF_ROT_EN (1 << 12)
+#define IC_CONF_PP_EN (1 << 16)
+#define IC_CONF_PP_CSC1 (1 << 17)
+#define IC_CONF_PP_CSC2 (1 << 18)
+#define IC_CONF_PP_CMB (1 << 19)
+#define IC_CONF_PP_ROT_EN (1 << 20)
+#define IC_CONF_IC_GLB_LOC_A (1 << 28)
+#define IC_CONF_KEY_COLOR_EN (1 << 29)
+#define IC_CONF_RWS_EN (1 << 30)
+#define IC_CONF_CSI_MEM_WR_EN (1 << 31)
+
+#define IC_IDMAC_1_CB0_BURST_16 (1 << 0)
+#define IC_IDMAC_1_CB1_BURST_16 (1 << 1)
+#define IC_IDMAC_1_CB2_BURST_16 (1 << 2)
+#define IC_IDMAC_1_CB3_BURST_16 (1 << 3)
+#define IC_IDMAC_1_CB4_BURST_16 (1 << 4)
+#define IC_IDMAC_1_CB5_BURST_16 (1 << 5)
+#define IC_IDMAC_1_CB6_BURST_16 (1 << 6)
+#define IC_IDMAC_1_CB7_BURST_16 (1 << 7)
+#define IC_IDMAC_1_PRPENC_ROT_MASK (0x7 << 11)
+#define IC_IDMAC_1_PRPENC_ROT_OFFSET 11
+#define IC_IDMAC_1_PRPVF_ROT_MASK (0x7 << 14)
+#define IC_IDMAC_1_PRPVF_ROT_OFFSET 14
+#define IC_IDMAC_1_PP_ROT_MASK (0x7 << 17)
+#define IC_IDMAC_1_PP_ROT_OFFSET 17
+#define IC_IDMAC_1_PP_FLIP_RS (1 << 22)
+#define IC_IDMAC_1_PRPVF_FLIP_RS (1 << 21)
+#define IC_IDMAC_1_PRPENC_FLIP_RS (1 << 20)
+
+#define IC_IDMAC_2_PRPENC_HEIGHT_MASK (0x3ff << 0)
+#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
+#define IC_IDMAC_2_PRPVF_HEIGHT_MASK (0x3ff << 10)
+#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET 10
+#define IC_IDMAC_2_PP_HEIGHT_MASK (0x3ff << 20)
+#define IC_IDMAC_2_PP_HEIGHT_OFFSET 20
+
+#define IC_IDMAC_3_PRPENC_WIDTH_MASK (0x3ff << 0)
+#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET 0
+#define IC_IDMAC_3_PRPVF_WIDTH_MASK (0x3ff << 10)
+#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET 10
+#define IC_IDMAC_3_PP_WIDTH_MASK (0x3ff << 20)
+#define IC_IDMAC_3_PP_WIDTH_OFFSET 20
+
+struct ic_task_regoffs {
+ u32 rsc;
+ u32 tpmem_csc[2];
+};
+
+struct ic_task_bitfields {
+ u32 ic_conf_en;
+ u32 ic_conf_rot_en;
+ u32 ic_conf_cmb_en;
+ u32 ic_conf_csc1_en;
+ u32 ic_conf_csc2_en;
+ u32 ic_cmb_galpha_bit;
+};
+
+static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
+ [IC_TASK_ENCODER] = {
+ .rsc = IC_PRP_ENC_RSC,
+ .tpmem_csc = {0x2008, 0},
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .rsc = IC_PRP_VF_RSC,
+ .tpmem_csc = {0x4028, 0x4040},
+ },
+ [IC_TASK_POST_PROCESSOR] = {
+ .rsc = IC_PP_RSC,
+ .tpmem_csc = {0x6060, 0x6078},
+ },
+};
+
+static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
+ [IC_TASK_ENCODER] = {
+ .ic_conf_en = IC_CONF_PRPENC_EN,
+ .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
+ .ic_conf_cmb_en = 0, /* NA */
+ .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
+ .ic_conf_csc2_en = 0, /* NA */
+ .ic_cmb_galpha_bit = 0, /* NA */
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .ic_conf_en = IC_CONF_PRPVF_EN,
+ .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
+ .ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
+ .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
+ .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
+ .ic_cmb_galpha_bit = 0,
+ },
+ [IC_TASK_POST_PROCESSOR] = {
+ .ic_conf_en = IC_CONF_PP_EN,
+ .ic_conf_rot_en = IC_CONF_PP_ROT_EN,
+ .ic_conf_cmb_en = IC_CONF_PP_CMB,
+ .ic_conf_csc1_en = IC_CONF_PP_CSC1,
+ .ic_conf_csc2_en = IC_CONF_PP_CSC2,
+ .ic_cmb_galpha_bit = 8,
+ },
+};
+
+struct ipu_ic_priv;
+
+struct ipu_ic {
+ enum ipu_ic_task task;
+ const struct ic_task_regoffs *reg;
+ const struct ic_task_bitfields *bit;
+
+ enum ipu_color_space in_cs, g_in_cs;
+ enum ipu_color_space out_cs;
+ bool graphics;
+ bool rotation;
+ bool in_use;
+
+ struct ipu_ic_priv *priv;
+};
+
+struct ipu_ic_priv {
+ void __iomem *base;
+ void __iomem *tpmem_base;
+ spinlock_t lock;
+ struct ipu_soc *ipu;
+ int use_count;
+ struct ipu_ic task[IC_NUM_TASKS];
+};
+
+static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
+{
+ return readl(ic->priv->base + offset);
+}
+
+static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
+{
+ writel(value, ic->priv->base + offset);
+}
+
+struct ic_csc_params {
+ s16 coeff[3][3]; /* signed 9-bit integer coefficients */
+ s16 offset[3]; /* signed 11+2-bit fixed point offset */
+ u8 scale:2; /* scale coefficients * 2^(scale-1) */
+ bool sat:1; /* saturate to (16, 235(Y) / 240(U, V)) */
+};
+
+/*
+ * Y = R * .299 + G * .587 + B * .114;
+ * U = R * -.169 + G * -.332 + B * .500 + 128.;
+ * V = R * .500 + G * -.419 + B * -.0813 + 128.;
+ */
+static const struct ic_csc_params ic_csc_rgb2ycbcr = {
+ .coeff = {
+ { 77, 150, 29 },
+ { 469, 427, 128 },
+ { 128, 405, 491 },
+ },
+ .offset = { 0, 512, 512 },
+ .scale = 1,
+};
+
+/* transparent RGB->RGB matrix for graphics combining */
+static const struct ic_csc_params ic_csc_rgb2rgb = {
+ .coeff = {
+ { 128, 0, 0 },
+ { 0, 128, 0 },
+ { 0, 0, 128 },
+ },
+ .scale = 2,
+};
+
+/*
+ * R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
+ * G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
+ * B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128);
+ */
+static const struct ic_csc_params ic_csc_ycbcr2rgb = {
+ .coeff = {
+ { 149, 0, 204 },
+ { 149, 462, 408 },
+ { 149, 255, 0 },
+ },
+ .offset = { -446, 266, -554 },
+ .scale = 2,
+};
+
+static int init_csc(struct ipu_ic *ic,
+ enum ipu_color_space inf,
+ enum ipu_color_space outf,
+ int csc_index)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ const struct ic_csc_params *params;
+ u32 __iomem *base;
+ const u16 (*c)[3];
+ const u16 *a;
+ u32 param;
+
+ base = (u32 __iomem *)
+ (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
+
+ if (inf == IPUV3_COLORSPACE_YUV && outf == IPUV3_COLORSPACE_RGB)
+ params = &ic_csc_ycbcr2rgb;
+ else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_YUV)
+ params = &ic_csc_rgb2ycbcr;
+ else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_RGB)
+ params = &ic_csc_rgb2rgb;
+ else {
+ dev_err(priv->ipu->dev, "Unsupported color space conversion\n");
+ return -EINVAL;
+ }
+
+ /* Cast to unsigned */
+ c = (const u16 (*)[3])params->coeff;
+ a = (const u16 *)params->offset;
+
+ param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
+ ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
+ (params->sat << 9);
+ writel(param, base++);
+
+ param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
+ ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[1] & 0x1fe0) >> 5);
+ writel(param, base++);
+
+ param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
+ ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
+ writel(param, base++);
+
+ param = ((a[2] & 0x1fe0) >> 5);
+ writel(param, base++);
+
+ return 0;
+}
+
+static int calc_resize_coeffs(struct ipu_ic *ic,
+ u32 in_size, u32 out_size,
+ u32 *resize_coeff,
+ u32 *downsize_coeff)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+ u32 temp_size, temp_downsize;
+
+ /*
+ * Input size cannot be more than 4096, and output size cannot
+ * be more than 1024
+ */
+ if (in_size > 4096) {
+ dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
+ return -EINVAL;
+ }
+ if (out_size > 1024) {
+ dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
+ return -EINVAL;
+ }
+
+ /* Cannot downsize more than 8:1 */
+ if ((out_size << 3) < in_size) {
+ dev_err(ipu->dev, "Unsupported downsize\n");
+ return -EINVAL;
+ }
+
+ /* Compute downsizing coefficient */
+ temp_downsize = 0;
+ temp_size = in_size;
+ while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
+ (temp_downsize < 2)) {
+ temp_size >>= 1;
+ temp_downsize++;
+ }
+ *downsize_coeff = temp_downsize;
+
+ /*
+ * compute resizing coefficient using the following equation:
+ * resize_coeff = M * (SI - 1) / (SO - 1)
+ * where M = 2^13, SI = input size, SO = output size
+ */
+ *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
+ if (*resize_coeff >= 16384L) {
+ dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
+ *resize_coeff = 0x3FFF;
+ }
+
+ return 0;
+}
+
+void ipu_ic_task_enable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 ic_conf;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ ic_conf |= ic->bit->ic_conf_en;
+
+ if (ic->rotation)
+ ic_conf |= ic->bit->ic_conf_rot_en;
+
+ if (ic->in_cs != ic->out_cs)
+ ic_conf |= ic->bit->ic_conf_csc1_en;
+
+ if (ic->graphics) {
+ ic_conf |= ic->bit->ic_conf_cmb_en;
+ ic_conf |= ic->bit->ic_conf_csc1_en;
+
+ if (ic->g_in_cs != ic->out_cs)
+ ic_conf |= ic->bit->ic_conf_csc2_en;
+ }
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
+
+void ipu_ic_task_disable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 ic_conf;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ ic_conf &= ~(ic->bit->ic_conf_en |
+ ic->bit->ic_conf_csc1_en |
+ ic->bit->ic_conf_rot_en);
+ if (ic->bit->ic_conf_csc2_en)
+ ic_conf &= ~ic->bit->ic_conf_csc2_en;
+ if (ic->bit->ic_conf_cmb_en)
+ ic_conf &= ~ic->bit->ic_conf_cmb_en;
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ ic->rotation = ic->graphics = false;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
+
+int ipu_ic_task_graphics_init(struct ipu_ic *ic,
+ enum ipu_color_space in_g_cs,
+ bool galpha_en, u32 galpha,
+ bool colorkey_en, u32 colorkey)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 reg, ic_conf;
+ int ret = 0;
+
+ if (ic->task == IC_TASK_ENCODER)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_conf = ipu_ic_read(ic, IC_CONF);
+
+ if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
+ /* need transparent CSC1 conversion */
+ ret = init_csc(ic, IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB, 0);
+ if (ret)
+ goto unlock;
+ }
+
+ ic->g_in_cs = in_g_cs;
+
+ if (ic->g_in_cs != ic->out_cs) {
+ ret = init_csc(ic, ic->g_in_cs, ic->out_cs, 1);
+ if (ret)
+ goto unlock;
+ }
+
+ if (galpha_en) {
+ ic_conf |= IC_CONF_IC_GLB_LOC_A;
+ reg = ipu_ic_read(ic, IC_CMBP_1);
+ reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
+ reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
+ ipu_ic_write(ic, reg, IC_CMBP_1);
+ } else
+ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
+
+ if (colorkey_en) {
+ ic_conf |= IC_CONF_KEY_COLOR_EN;
+ ipu_ic_write(ic, colorkey, IC_CMBP_2);
+ } else
+ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
+
+ ipu_ic_write(ic, ic_conf, IC_CONF);
+
+ ic->graphics = true;
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
+
+int ipu_ic_task_init(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ u32 reg, downsize_coeff, resize_coeff;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Setup vertical resizing */
+ ret = calc_resize_coeffs(ic, in_height, out_height,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ reg = (downsize_coeff << 30) | (resize_coeff << 16);
+
+ /* Setup horizontal resizing */
+ ret = calc_resize_coeffs(ic, in_width, out_width,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ reg |= (downsize_coeff << 14) | resize_coeff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ipu_ic_write(ic, reg, ic->reg->rsc);
+
+ /* Setup color space conversion */
+ ic->in_cs = in_cs;
+ ic->out_cs = out_cs;
+
+ if (ic->in_cs != ic->out_cs) {
+ ret = init_csc(ic, ic->in_cs, ic->out_cs, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_init);
+
+int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
+ u32 width, u32 height, int burst_size,
+ enum ipu_rotate_mode rot)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
+ u32 temp_rot = bitrev8(rot) >> 5;
+ bool need_hor_flip = false;
+ unsigned long flags;
+ int ret = 0;
+
+ if ((burst_size != 8) && (burst_size != 16)) {
+ dev_err(ipu->dev, "Illegal burst length for IC\n");
+ return -EINVAL;
+ }
+
+ width--;
+ height--;
+
+ if (temp_rot & 0x2) /* Need horizontal flip */
+ need_hor_flip = true;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
+ ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
+ ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
+
+ switch (channel->num) {
+ case IPUV3_CHANNEL_IC_PP_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_IC_PP:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_PP:
+ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_IC_PRP_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
+ break;
+ case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_ENC:
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_IC_PRP_VF_MEM:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
+
+ if (need_hor_flip)
+ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
+
+ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
+ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
+
+ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
+ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
+ break;
+ case IPUV3_CHANNEL_MEM_ROT_VF:
+ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
+ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
+ break;
+ case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
+ break;
+ case IPUV3_CHANNEL_G_MEM_IC_PP:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
+ break;
+ case IPUV3_CHANNEL_VDI_MEM_IC_VF:
+ if (burst_size == 16)
+ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
+ else
+ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
+ ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
+ ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
+
+ if (rot >= IPU_ROTATE_90_RIGHT)
+ ic->rotation = true;
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
+
+int ipu_ic_enable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 module = IPU_CONF_IC_EN;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ic->rotation)
+ module |= IPU_CONF_ROT_EN;
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, module);
+
+ priv->use_count++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_enable);
+
+int ipu_ic_disable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+ u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, module);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_disable);
+
+struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
+{
+ struct ipu_ic_priv *priv = ipu->ic_priv;
+ unsigned long flags;
+ struct ipu_ic *ic, *ret;
+
+ if (task >= IC_NUM_TASKS)
+ return ERR_PTR(-EINVAL);
+
+ ic = &priv->task[task];
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ic->in_use) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ ic->in_use = true;
+ ret = ic;
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_ic_get);
+
+void ipu_ic_put(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ic->in_use = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_ic_put);
+
+int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long tpmem_base)
+{
+ struct ipu_ic_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipu->ic_priv = priv;
+
+ spin_lock_init(&priv->lock);
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+ priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
+ if (!priv->tpmem_base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
+
+ priv->ipu = ipu;
+
+ for (i = 0; i < IC_NUM_TASKS; i++) {
+ priv->task[i].task = i;
+ priv->task[i].priv = priv;
+ priv->task[i].reg = &ic_task_reg[i];
+ priv->task[i].bit = &ic_task_bit[i];
+ }
+
+ return 0;
+}
+
+void ipu_ic_exit(struct ipu_soc *ipu)
+{
+}
+
+void ipu_ic_dump(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+ struct ipu_soc *ipu = priv->ipu;
+
+ dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CONF));
+ dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PRP_ENC_RSC));
+ dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PRP_VF_RSC));
+ dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
+ ipu_ic_read(ic, IC_PP_RSC));
+ dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CMBP_1));
+ dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_CMBP_2));
+ dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_1));
+ dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_2));
+ dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_3));
+ dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
+ ipu_ic_read(ic, IC_IDMAC_4));
+}
+EXPORT_SYMBOL_GPL(ipu_ic_dump);
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index c93f50ec04f7..bfb1e8a4483f 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -24,23 +24,6 @@ struct ipu_soc;
#include <video/imx-ipu-v3.h>
-#define IPUV3_CHANNEL_CSI0 0
-#define IPUV3_CHANNEL_CSI1 1
-#define IPUV3_CHANNEL_CSI2 2
-#define IPUV3_CHANNEL_CSI3 3
-#define IPUV3_CHANNEL_MEM_BG_SYNC 23
-#define IPUV3_CHANNEL_MEM_FG_SYNC 27
-#define IPUV3_CHANNEL_MEM_DC_SYNC 28
-#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
-#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
-#define IPUV3_CHANNEL_ROT_ENC_MEM 45
-#define IPUV3_CHANNEL_ROT_VF_MEM 46
-#define IPUV3_CHANNEL_ROT_PP_MEM 47
-#define IPUV3_CHANNEL_ROT_ENC_MEM_OUT 48
-#define IPUV3_CHANNEL_ROT_VF_MEM_OUT 49
-#define IPUV3_CHANNEL_ROT_PP_MEM_OUT 50
-#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
-
#define IPU_MCU_T_DEFAULT 8
#define IPU_CM_IDMAC_REG_OFS 0x00008000
#define IPU_CM_IC_REG_OFS 0x00020000
@@ -85,6 +68,7 @@ struct ipu_soc;
#define IPU_DISP_TASK_STAT IPU_CM_REG(0x0254)
#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
+#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
@@ -148,9 +132,12 @@ struct ipuv3_channel {
struct ipu_soc *ipu;
};
+struct ipu_cpmem;
+struct ipu_csi;
struct ipu_dc_priv;
struct ipu_dmfc_priv;
struct ipu_di;
+struct ipu_ic_priv;
struct ipu_smfc_priv;
struct ipu_devtype;
@@ -164,7 +151,6 @@ struct ipu_soc {
void __iomem *cm_reg;
void __iomem *idmac_reg;
- struct ipu_ch_param __iomem *cpmem_base;
int usecount;
@@ -176,13 +162,27 @@ struct ipu_soc {
int irq_err;
struct irq_domain *domain;
+ struct ipu_cpmem *cpmem_priv;
struct ipu_dc_priv *dc_priv;
struct ipu_dp_priv *dp_priv;
struct ipu_dmfc_priv *dmfc_priv;
struct ipu_di *di_priv[2];
+ struct ipu_csi *csi_priv[2];
+ struct ipu_ic_priv *ic_priv;
struct ipu_smfc_priv *smfc_priv;
};
+static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->idmac_reg + offset);
+}
+
+static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
+ unsigned offset)
+{
+ writel(value, ipu->idmac_reg + offset);
+}
+
void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
@@ -191,6 +191,14 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
+int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *clk_ipu);
+void ipu_csi_exit(struct ipu_soc *ipu, int id);
+
+int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long tpmem_base);
+void ipu_ic_exit(struct ipu_soc *ipu);
+
int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *ipu_clk);
void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
index e4f85ad286fc..6ca9b43ce25a 100644
--- a/drivers/gpu/ipu-v3/ipu-smfc.c
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -21,9 +21,18 @@
#include "ipu-prv.h"
+struct ipu_smfc {
+ struct ipu_smfc_priv *priv;
+ int chno;
+ bool inuse;
+};
+
struct ipu_smfc_priv {
void __iomem *base;
spinlock_t lock;
+ struct ipu_soc *ipu;
+ struct ipu_smfc channel[4];
+ int use_count;
};
/*SMFC Registers */
@@ -31,63 +40,166 @@ struct ipu_smfc_priv {
#define SMFC_WMC 0x0004
#define SMFC_BS 0x0008
-int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize)
+int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
{
- struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
u32 val, shift;
- spin_lock_irqsave(&smfc->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- shift = channel * 4;
- val = readl(smfc->base + SMFC_BS);
+ shift = smfc->chno * 4;
+ val = readl(priv->base + SMFC_BS);
val &= ~(0xf << shift);
val |= burstsize << shift;
- writel(val, smfc->base + SMFC_BS);
+ writel(val, priv->base + SMFC_BS);
- spin_unlock_irqrestore(&smfc->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
-int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id)
+int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
{
- struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ struct ipu_smfc_priv *priv = smfc->priv;
unsigned long flags;
u32 val, shift;
- spin_lock_irqsave(&smfc->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- shift = channel * 3;
- val = readl(smfc->base + SMFC_MAP);
+ shift = smfc->chno * 3;
+ val = readl(priv->base + SMFC_MAP);
val &= ~(0x7 << shift);
val |= ((csi_id << 2) | mipi_id) << shift;
- writel(val, smfc->base + SMFC_MAP);
+ writel(val, priv->base + SMFC_MAP);
- spin_unlock_irqrestore(&smfc->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
+int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
+ val = readl(priv->base + SMFC_WMC);
+ val &= ~(0x3f << shift);
+ val |= ((clr_level << 3) | set_level) << shift;
+ writel(val, priv->base + SMFC_WMC);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
+
+int ipu_smfc_enable(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
+
+ priv->use_count++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_enable);
+
+int ipu_smfc_disable(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_disable);
+
+struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
+{
+ struct ipu_smfc_priv *priv = ipu->smfc_priv;
+ struct ipu_smfc *smfc, *ret;
+ unsigned long flags;
+
+ if (chno >= 4)
+ return ERR_PTR(-EINVAL);
+
+ smfc = &priv->channel[chno];
+ ret = smfc;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (smfc->inuse) {
+ ret = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ smfc->inuse = true;
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_get);
+
+void ipu_smfc_put(struct ipu_smfc *smfc)
+{
+ struct ipu_smfc_priv *priv = smfc->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ smfc->inuse = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_put);
+
int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base)
{
- struct ipu_smfc_priv *smfc;
+ struct ipu_smfc_priv *priv;
+ int i;
- smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL);
- if (!smfc)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- ipu->smfc_priv = smfc;
- spin_lock_init(&smfc->lock);
+ ipu->smfc_priv = priv;
+ spin_lock_init(&priv->lock);
+ priv->ipu = ipu;
- smfc->base = devm_ioremap(dev, base, PAGE_SIZE);
- if (!smfc->base)
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
return -ENOMEM;
- pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base);
+ for (i = 0; i < 4; i++) {
+ priv->channel[i].priv = priv;
+ priv->channel[i].chno = i;
+ }
+
+ pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
return 0;
}