summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-20 14:06:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-20 14:06:06 -0700
commit14aa02449064541217836b9f3d3295e241d5ae9c (patch)
tree3456d1c397041cf86579cd2aed18de99f3240c81 /drivers
parent79319a052cb0ae862954fe9f6e606417f1698ddb (diff)
parent2c33ce009ca2389dbf0535d0672214d09738e35e (diff)
Merge branch 'drm-next-merged' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: Core: - Virtual GEM layer merged, this has been around for a long time, and it provides a software backed device that allows userspace to use it as a GEM shared memory handler. This makes it a lot easier to do certain things when you have no GPU but still have to deal with DRI expectations. - atomic helper updates. - framebuffer modifier interface added. - i2c over auxch displayport fixes. - fb width/height confusion fixes. - new driver for ps8622/ps8625 bridge chips - lots of new panels i915: - more plane atomic conversion - vGPU guest support for XenGT - Skylake workarounds and fixes - Y-tiling support - work on dynamic pagetable allocation - EU count report param for gen9+ - CHV fixes (no longer prelim) - remove ilk rc6 - frontbuffer tracking for fbc - Displayport link rate refactoring - sprite colorkey refactor radeon: - Displayport MST support (not enabled by default) - non-ATOM native hw auxch support (DCE5+) - output csc support - new queries for userspace debug support - new VCE packet nouveau: - gk20a iommu support - gm107 graphics support - more gm20x bringup (waiting on signed nvidia fw). amdkfd: - multiple kgd instance support - use 64-bit time accessors msm: - stolen memory support - DSI and dual-DSI support - snapdragon 410 support exynos: - cleanups for atomic and pageflip imx-drm: - more media-bus formats - TV output prep - drm panel support tegra: - hw vblank counter using host1x syncpoints omap: - universal plane support - prep work for atomic modesetting rcar-du: - ported to atomic modesetting atmel-hlcdc: - ported to atomic modesetting - added suspend/resume support sti: - ported to atomic modesetting dwhdmi: - more compliant audio support - update rockchip phy support tda998x: - DT probing for attached crtcs - simplified EDID reading rockchip: - fixes adv7511: - fixes" * 'drm-next-merged' of git://people.freedesktop.org/~airlied/linux: (689 commits) media-bus: Fixup RGB444_1X12, RGB565_1X16, and YUV8_1X24 media bus format drm/i915: Dont enable CS_PARSER_ERROR interrupts at all drm/i915: Move drm_framebuffer_unreference out of struct_mutex for takeover drm: fix trivial typo mistake drm: Make integer overflow checking cover universal cursor updates (v2) drm/nouveau/bios: fix fetching from acpi on certain systems drm/nouveau/gr/gm206: initial init+ctx code drm/nouveau/ce/gm206: enable support via gm204 code drm/nouveau/fifo/gm206: enable support via gm204 code drm/nouveau/gr/gm204: initial init+ctx code drm/nouveau: support for buffer moves via MaxwellDmaCopyA drm/nouveau/ce/gm204: initial support drm/nouveau: add support for gm20x fifo channels drm/nouveau/fifo/gm204: initial support drm/nouveau/gr/gk104-: prevent reading non-existent regs in intr handler drm/nouveau/gr/gm107: very slightly demagic part of attrib cb setup drm/nouveau/gr/gk104-: correct crop/zrop num_active_fbps setting drm/nouveau/gr/gf100-: add symbolic names for classes drm/nouveau/gr/gm107: support tpc "strand" ctxsw in gpccs ucode drm/nouveau/gr/gf100-: support mmio access with gpc offset from gpccs ucode ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h64
-rw-r--r--drivers/gpu/drm/armada/armada_output.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c311
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c40
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h62
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c41
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c640
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/bridge/ps8622.c684
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c205
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c660
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c114
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c34
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c80
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c13
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c62
-rw-r--r--drivers/gpu/drm/drm_info.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c63
-rw-r--r--drivers/gpu/drm/drm_irq.c58
-rw-r--r--drivers/gpu/drm/drm_modes.c12
-rw-r--r--drivers/gpu/drm/drm_of.c10
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c39
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c61
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c178
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c101
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c251
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c260
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c197
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c101
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c75
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c352
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c242
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c178
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h392
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c545
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c194
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1068
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h160
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c335
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c22
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c294
-rw-r--r--drivers/gpu/drm/i915/i915_params.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h199
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c215
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h107
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c552
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c264
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h91
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1788
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c459
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c38
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h126
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c184
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c208
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1286
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c351
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c266
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c25
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c476
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c69
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c25
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c196
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c24
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig11
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c212
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h117
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h418
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c1993
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c705
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c352
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h399
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c102
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c343
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c315
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c83
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c200
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c64
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c100
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h29
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c1054
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h480
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c387
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c440
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c622
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c80
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c241
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c66
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c57
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c146
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c256
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig8
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c25
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c171
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c55
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/cik.c132
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c129
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/ni.c29
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c36
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h44
-rw-r--r--drivers/gpu/drm/radeon/nid.h47
-rw-r--r--drivers/gpu/drm/radeon/r600.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c206
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c782
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c102
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c46
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c44
-rw-r--r--drivers/gpu/drm/radeon/si.c130
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c36
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c25
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c400
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c71
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c349
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c422
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c143
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c175
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c86
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h6
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c66
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c6
-rw-r--r--drivers/gpu/drm/tegra/dc.c105
-rw-r--r--drivers/gpu/drm/tegra/dc.h7
-rw-r--r--drivers/gpu/drm/tegra/drm.c22
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h2
-rw-r--r--drivers/gpu/drm/tegra/sor.c202
-rw-r--r--drivers/gpu/drm/vgem/Makefile4
-rw-r--r--drivers/gpu/drm/vgem/vgem_dma_buf.c94
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c364
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h (renamed from drivers/gpu/drm/i915/intel_dsi_cmd.h)46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/host1x/syncpt.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c18
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c4
358 files changed, 24488 insertions, 10124 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 151a050129e7..47f2ce81b412 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -165,6 +165,15 @@ config DRM_SAVAGE
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+config DRM_VGEM
+ tristate "Virtual GEM provider"
+ depends on DRM
+ help
+ Choose this option to get a virtual graphics memory manager,
+ as used by Mesa's software renderer for enhanced performance.
+ If M is selected the module will be called vgem.
+
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2c239b99de64..7d4944e1a60c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5c50aa8a8908..19a4fba46e4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -435,21 +435,22 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec time;
+ struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter =
+ dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic(&time);
- args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ getrawmonotonic64(&time);
+ args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
- get_monotonic_boottime(&time);
- args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ get_monotonic_boottime64(&time);
+ args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 5bc32c26b989..ca7f2d3af2ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -94,7 +94,8 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
return NULL;
}
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
{
struct kfd_dev *kfd;
@@ -112,6 +113,11 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
kfd->device_info = device_info;
kfd->pdev = pdev;
kfd->init_complete = false;
+ kfd->kfd2kgd = f2g;
+
+ mutex_init(&kfd->doorbell_mutex);
+ memset(&kfd->doorbell_available_index, 0,
+ sizeof(kfd->doorbell_available_index));
return kfd;
}
@@ -200,8 +206,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
+ if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ kfd->kgd, size, &kfd->gtt_mem,
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
dev_err(kfd_device,
"Could not allocate %d bytes for device (%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
@@ -270,7 +277,7 @@ device_iommu_pasid_error:
kfd_topology_add_device_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -285,7 +292,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
amd_iommu_free_device(kfd->pdev);
kfd_topology_remove_device(kfd);
kfd_gtt_sa_fini(kfd);
- kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d8135adb2238..69af73f15310 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -82,7 +82,8 @@ static inline unsigned int get_pipes_num_cpsch(void)
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+ return dqm->dev->kfd2kgd->program_sh_mem_settings(
+ dqm->dev->kgd, qpd->vmid,
qpd->sh_mem_config,
qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit,
@@ -457,9 +458,12 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
{
uint32_t pasid_mapping;
- pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID;
- return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+ pasid_mapping = (pasid == 0) ? 0 :
+ (uint32_t)pasid |
+ ATC_VMID_PASID_MAPPING_VALID;
+
+ return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
+ dqm->dev->kgd, pasid_mapping,
vmid);
}
@@ -511,7 +515,7 @@ int init_pipelines(struct device_queue_manager *dqm,
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
+ dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -905,7 +909,7 @@ out:
return retval;
}
-static int fence_wait_timeout(unsigned int *fence_addr,
+static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
@@ -961,7 +965,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
- fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 1a9b355dd114..17e56dcc8540 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -32,9 +32,6 @@
* and that's assures that any user process won't get access to the
* kernel doorbells page
*/
-static DEFINE_MUTEX(doorbell_mutex);
-static unsigned long doorbell_available_index[
- DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
#define KERNEL_DOORBELL_PASID 1
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
@@ -170,12 +167,12 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
BUG_ON(!kfd || !doorbell_off);
- mutex_lock(&doorbell_mutex);
- inx = find_first_zero_bit(doorbell_available_index,
+ mutex_lock(&kfd->doorbell_mutex);
+ inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
- __set_bit(inx, doorbell_available_index);
- mutex_unlock(&doorbell_mutex);
+ __set_bit(inx, kfd->doorbell_available_index);
+ mutex_unlock(&kfd->doorbell_mutex);
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
@@ -203,9 +200,9 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
- mutex_lock(&doorbell_mutex);
- __clear_bit(inx, doorbell_available_index);
- mutex_unlock(&doorbell_mutex);
+ mutex_lock(&kfd->doorbell_mutex);
+ __clear_bit(inx, kfd->doorbell_available_index);
+ mutex_unlock(&kfd->doorbell_mutex);
}
inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 3f34ae16f075..4e0a68f13a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -34,7 +34,6 @@
#define KFD_DRIVER_MINOR 7
#define KFD_DRIVER_PATCHLEVEL 1
-const struct kfd2kgd_calls *kfd2kgd;
static const struct kgd2kfd_calls kgd2kfd = {
.exit = kgd2kfd_exit,
.probe = kgd2kfd_probe,
@@ -55,9 +54,7 @@ module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
-bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
- const struct kgd2kfd_calls **g2f)
+bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
{
/*
* Only one interface version is supported,
@@ -66,11 +63,6 @@ bool kgd2kfd_init(unsigned interface_version,
if (interface_version != KFD_INTERFACE_VERSION)
return false;
- /* Protection against multiple amd kgd loads */
- if (kfd2kgd)
- return true;
-
- kfd2kgd = f2g;
*g2f = &kgd2kfd;
return true;
@@ -85,8 +77,6 @@ static int __init kfd_module_init(void)
{
int err;
- kfd2kgd = NULL;
-
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index a09e18a339f3..434979428fc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -151,14 +151,15 @@ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
- return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ return mm->dev->kfd2kgd->hqd_load
+ (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr)
{
- return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
@@ -245,7 +246,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
pipe_id, queue_id);
}
@@ -258,7 +259,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -266,7 +267,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+ return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
@@ -275,7 +276,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
}
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5a44f2fecf38..f21fccebd75b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -148,6 +148,11 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
+ const struct kfd2kgd_calls *kfd2kgd;
+ struct mutex doorbell_mutex;
+ unsigned long doorbell_available_index[DIV_ROUND_UP(
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
void *gtt_mem;
uint64_t gtt_start_gpu_addr;
void *gtt_start_cpu_ptr;
@@ -164,13 +169,12 @@ struct kfd_dev {
/* KGD2KFD callbacks */
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
- const struct kgd2kfd_shared_resources *gpu_resources);
+ const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-extern const struct kfd2kgd_calls *kfd2kgd;
-
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -378,8 +382,6 @@ struct qcm_process_device {
/* The Device Queue Manager that owns this data */
struct device_queue_manager *dqm;
struct process_queue_manager *pqm;
- /* Device Queue Manager lock */
- struct mutex *lock;
/* Queues list */
struct list_head queues_list;
struct list_head priv_queue_list;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a369c149d172..945d6226dc51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -162,10 +162,16 @@ static void kfd_process_wq_release(struct work_struct *work)
p = my_work->p;
+ pr_debug("Releasing process (pasid %d) in workqueue\n",
+ p->pasid);
+
mutex_lock(&p->mutex);
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
+ pdd->dev->id, p->pasid);
+
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 498399323a8c..661c6605d31b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -726,13 +726,14 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
}
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
- kfd2kgd->get_vmem_size(dev->gpu->kgd));
+ dev->gpu->kfd2kgd->get_vmem_size(
+ dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
- kfd2kgd->get_fw_version(
+ dev->gpu->kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
@@ -1099,8 +1100,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
buf[2] = gpu->pdev->subsystem_device;
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
- buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
- buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+ buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
+ & 0xffffffff);
+ buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
for (i = 0, hashout = 0; i < 7; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 239bc16a1ddd..dabd94446b7b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -77,37 +77,6 @@ struct kgd2kfd_shared_resources {
};
/**
- * struct kgd2kfd_calls
- *
- * @exit: Notifies amdkfd that kgd module is unloaded
- *
- * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
- *
- * @device_init: Initialize the newly probed device (if it is a device that
- * amdkfd supports)
- *
- * @device_exit: Notifies amdkfd about a removal of a kgd device
- *
- * @suspend: Notifies amdkfd about a suspend action done to a kgd device
- *
- * @resume: Notifies amdkfd about a resume action done to a kgd device
- *
- * This structure contains function callback pointers so the kgd driver
- * will notify to the amdkfd about certain status changes.
- *
- */
-struct kgd2kfd_calls {
- void (*exit)(void);
- struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
- bool (*device_init)(struct kfd_dev *kfd,
- const struct kgd2kfd_shared_resources *gpu_resources);
- void (*device_exit)(struct kfd_dev *kfd);
- void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
- void (*suspend)(struct kfd_dev *kfd);
- int (*resume)(struct kfd_dev *kfd);
-};
-
-/**
* struct kfd2kgd_calls
*
* @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
@@ -196,8 +165,39 @@ struct kfd2kgd_calls {
enum kgd_engine_type type);
};
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+ void (*exit)(void);
+ struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
+ const struct kfd2kgd_calls *f2g);
+ bool (*device_init)(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+ void (*device_exit)(struct kfd_dev *kfd);
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+};
+
bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
const struct kgd2kfd_calls **g2f);
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
index 4126d43b5057..3c4023e142d0 100644
--- a/drivers/gpu/drm/armada/armada_output.h
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -9,7 +9,7 @@
#define ARMADA_CONNETOR_H
#define encoder_helper_funcs(encoder) \
- ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+ ((const struct drm_encoder_helper_funcs *)encoder->helper_private)
struct armada_output_type {
int connector_type;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index b3e3068c6ec0..f69b92535505 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -37,14 +38,14 @@
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @event: pointer to the current page flip event
* @id: CRTC id (returned by drm_crtc_index)
- * @dpms: DPMS mode
+ * @enabled: CRTC state
*/
struct atmel_hlcdc_crtc {
struct drm_crtc base;
struct atmel_hlcdc_dc *dc;
struct drm_pending_vblank_event *event;
int id;
- int dpms;
+ bool enabled;
};
static inline struct atmel_hlcdc_crtc *
@@ -53,86 +54,17 @@ drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc)
return container_of(crtc, struct atmel_hlcdc_crtc, base);
}
-static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode)
-{
- struct drm_device *dev = c->dev;
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- struct regmap *regmap = crtc->dc->hlcdc->regmap;
- unsigned int status;
-
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (crtc->dpms == mode)
- return;
-
- pm_runtime_get_sync(dev->dev);
-
- if (mode != DRM_MODE_DPMS_ON) {
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_DISP))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_SYNC))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_PIXEL_CLK))
- cpu_relax();
-
- clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
-
- pm_runtime_allow(dev->dev);
- } else {
- pm_runtime_forbid(dev->dev);
-
- clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_PIXEL_CLK))
- cpu_relax();
-
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_SYNC))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_DISP))
- cpu_relax();
- }
-
- pm_runtime_put_sync(dev->dev);
-
- crtc->dpms = mode;
-}
-
-static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
- struct drm_plane *plane = c->primary;
- struct drm_framebuffer *fb;
+ struct drm_display_mode *adj = &c->state->adjusted_mode;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
unsigned int cfg;
int div;
- if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK)
- return -EINVAL;
-
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start;
@@ -156,7 +88,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
cfg = 0;
prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
- mode_rate = mode->crtc_clock * 1000;
+ mode_rate = adj->crtc_clock * 1000;
if ((prate / 2) < mode_rate) {
prate *= 2;
cfg |= ATMEL_HLCDC_CLKSEL;
@@ -174,10 +106,10 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
cfg = 0;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
@@ -187,77 +119,155 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK,
cfg);
+}
+
+static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
+{
+ struct drm_device *dev = c->dev;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ unsigned int status;
+
+ if (!crtc->enabled)
+ return;
+
+ drm_crtc_vblank_off(c);
+
+ pm_runtime_get_sync(dev->dev);
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
+ pinctrl_pm_select_sleep_state(dev->dev);
+
+ pm_runtime_allow(dev->dev);
- fb = plane->fb;
- plane->fb = old_fb;
+ pm_runtime_put_sync(dev->dev);
- return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0,
- adj->hdisplay, adj->vdisplay,
- x << 16, y << 16,
- adj->hdisplay << 16,
- adj->vdisplay << 16,
- adj);
+ crtc->enabled = false;
}
-int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y,
- struct drm_framebuffer *old_fb)
+static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
{
- struct drm_plane *plane = c->primary;
- struct drm_framebuffer *fb = plane->fb;
- struct drm_display_mode *mode = &c->hwmode;
-
- plane->fb = old_fb;
-
- return plane->funcs->update_plane(plane, c, fb,
- 0, 0,
- mode->hdisplay,
- mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16,
- mode->vdisplay << 16);
+ struct drm_device *dev = c->dev;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ unsigned int status;
+
+ if (crtc->enabled)
+ return;
+
+ pm_runtime_get_sync(dev->dev);
+
+ pm_runtime_forbid(dev->dev);
+
+ pinctrl_pm_select_default_state(dev->dev);
+ clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+
+ pm_runtime_put_sync(dev->dev);
+
+ drm_crtc_vblank_on(c);
+
+ crtc->enabled = true;
}
-static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc)
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *c)
{
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (crtc->enabled) {
+ atmel_hlcdc_crtc_disable(c);
+ /* save enable state for resume */
+ crtc->enabled = true;
+ }
}
-static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc)
+void atmel_hlcdc_crtc_resume(struct drm_crtc *c)
{
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (crtc->enabled) {
+ crtc->enabled = false;
+ atmel_hlcdc_crtc_enable(c);
+ }
}
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
+ struct drm_crtc_state *s)
{
- return true;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK)
+ return -EINVAL;
+
+ return atmel_hlcdc_plane_prepare_disc_area(s);
}
-static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
{
- struct drm_plane *plane;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- crtc->primary->funcs->disable_plane(crtc->primary);
+ if (c->state->event) {
+ c->state->event->pipe = drm_crtc_index(c);
- drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
- if (plane->crtc != crtc)
- continue;
+ WARN_ON(drm_crtc_vblank_get(c) != 0);
- plane->funcs->disable_plane(crtc->primary);
- plane->crtc = NULL;
+ crtc->event = c->state->event;
+ c->state->event = NULL;
}
}
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+{
+ /* TODO: write common plane control register if available */
+}
+
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_fixup = atmel_hlcdc_crtc_mode_fixup,
- .dpms = atmel_hlcdc_crtc_dpms,
- .mode_set = atmel_hlcdc_crtc_mode_set,
- .mode_set_base = atmel_hlcdc_crtc_mode_set_base,
- .prepare = atmel_hlcdc_crtc_prepare,
- .commit = atmel_hlcdc_crtc_commit,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.disable = atmel_hlcdc_crtc_disable,
+ .enable = atmel_hlcdc_crtc_enable,
+ .atomic_check = atmel_hlcdc_crtc_atomic_check,
+ .atomic_begin = atmel_hlcdc_crtc_atomic_begin,
+ .atomic_flush = atmel_hlcdc_crtc_atomic_flush,
};
static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
@@ -306,61 +316,13 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
-static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- struct atmel_hlcdc_plane_update_req req;
- struct drm_plane *plane = c->primary;
- struct drm_device *dev = c->dev;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (crtc->event)
- ret = -EBUSY;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (ret)
- return ret;
-
- memset(&req, 0, sizeof(req));
- req.crtc_x = 0;
- req.crtc_y = 0;
- req.crtc_h = c->mode.crtc_vdisplay;
- req.crtc_w = c->mode.crtc_hdisplay;
- req.src_x = c->x << 16;
- req.src_y = c->y << 16;
- req.src_w = req.crtc_w << 16;
- req.src_h = req.crtc_h << 16;
- req.fb = fb;
-
- ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode);
- if (ret)
- return ret;
-
- if (event) {
- drm_vblank_get(c->dev, crtc->id);
- spin_lock_irqsave(&dev->event_lock, flags);
- crtc->event = event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- ret = atmel_hlcdc_plane_apply_update_req(plane, &req);
- if (ret)
- crtc->event = NULL;
- else
- plane->fb = fb;
-
- return ret;
-}
-
static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
- .page_flip = atmel_hlcdc_crtc_page_flip,
- .set_config = drm_crtc_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
.destroy = atmel_hlcdc_crtc_destroy,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
@@ -375,7 +337,6 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
if (!crtc)
return -ENOMEM;
- crtc->dpms = DRM_MODE_DPMS_OFF;
crtc->dc = dc;
ret = drm_crtc_init_with_planes(dev, &crtc->base,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1cb17493e0d..60b0c13d7ff5 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -222,6 +222,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
.output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -317,6 +319,8 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -555,6 +559,41 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm_dev);
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+ atmel_hlcdc_crtc_suspend(crtc);
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static int atmel_hlcdc_dc_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm_dev);
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+ atmel_hlcdc_crtc_resume(crtc);
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
+ atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
+
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
{ },
@@ -565,6 +604,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
+ .pm = &atmel_hlcdc_dc_drm_pm_ops,
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 7bc96af3397a..cf6b375bc38d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -26,11 +26,14 @@
#include <linux/irqdomain.h>
#include <linux/pwm.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
#include "atmel_hlcdc_layer.h"
@@ -69,7 +72,6 @@ struct atmel_hlcdc_dc_desc {
*/
struct atmel_hlcdc_plane_properties {
struct drm_property *alpha;
- struct drm_property *rotation;
};
/**
@@ -84,7 +86,6 @@ struct atmel_hlcdc_plane {
struct drm_plane base;
struct atmel_hlcdc_layer layer;
struct atmel_hlcdc_plane_properties *properties;
- unsigned int rotation;
};
static inline struct atmel_hlcdc_plane *
@@ -100,43 +101,6 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l)
}
/**
- * Atmel HLCDC Plane update request structure.
- *
- * @crtc_x: x position of the plane relative to the CRTC
- * @crtc_y: y position of the plane relative to the CRTC
- * @crtc_w: visible width of the plane
- * @crtc_h: visible height of the plane
- * @src_x: x buffer position
- * @src_y: y buffer position
- * @src_w: buffer width
- * @src_h: buffer height
- * @fb: framebuffer object object
- * @bpp: bytes per pixel deduced from pixel_format
- * @offsets: offsets to apply to the GEM buffers
- * @xstride: value to add to the pixel pointer between each line
- * @pstride: value to add to the pixel pointer between each pixel
- * @nplanes: number of planes (deduced from pixel_format)
- */
-struct atmel_hlcdc_plane_update_req {
- int crtc_x;
- int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
- uint32_t src_x;
- uint32_t src_y;
- uint32_t src_w;
- uint32_t src_h;
- struct drm_framebuffer *fb;
-
- /* These fields are private and should not be touched */
- int bpp[ATMEL_HLCDC_MAX_PLANES];
- unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
- int xstride[ATMEL_HLCDC_MAX_PLANES];
- int pstride[ATMEL_HLCDC_MAX_PLANES];
- int nplanes;
-};
-
-/**
* Atmel HLCDC Planes.
*
* This structure stores the instantiated HLCDC Planes and can be accessed by
@@ -184,28 +148,16 @@ int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
struct atmel_hlcdc_planes *
atmel_hlcdc_create_planes(struct drm_device *dev);
-int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode);
-
-int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req);
-
-int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- const struct drm_display_mode *mode);
+int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
struct drm_file *file);
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
+void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
+
int atmel_hlcdc_crtc_create(struct drm_device *dev);
int atmel_hlcdc_create_outputs(struct drm_device *dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index e79bd9ba474b..377e43cea9dd 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -298,7 +298,7 @@ void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
spin_unlock_irqrestore(&layer->lock, flags);
}
-int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
+void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
struct atmel_hlcdc_layer_update *upd = &layer->update;
@@ -341,8 +341,6 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
dma->status = ATMEL_HLCDC_LAYER_DISABLED;
spin_unlock_irqrestore(&layer->lock, flags);
-
- return 0;
}
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
index 27e56c0862ec..9beabc940bce 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
@@ -120,6 +120,7 @@
#define ATMEL_HLCDC_LAYER_DISCEN BIT(11)
#define ATMEL_HLCDC_LAYER_GA_SHIFT 16
#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT)
+#define ATMEL_HLCDC_LAYER_GA(x) ((x) << ATMEL_HLCDC_LAYER_GA_SHIFT)
#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o)
@@ -376,7 +377,7 @@ int atmel_hlcdc_layer_init(struct drm_device *dev,
void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
struct atmel_hlcdc_layer *layer);
-int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
+void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index c402192362c5..9c4513005310 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -86,25 +86,22 @@ atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
return container_of(output, struct atmel_hlcdc_panel, base);
}
-static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder,
- int mode)
+static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == rgb->dpms)
- return;
+ drm_panel_enable(panel->panel);
+}
- if (mode != DRM_MODE_DPMS_ON)
- drm_panel_disable(panel->panel);
- else
- drm_panel_enable(panel->panel);
+static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
+ struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- rgb->dpms = mode;
+ drm_panel_disable(panel->panel);
}
static bool
@@ -115,16 +112,6 @@ atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder)
-{
- atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder)
-{
- atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
static void
atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -156,11 +143,10 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
- .dpms = atmel_hlcdc_panel_encoder_dpms,
.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
- .prepare = atmel_hlcdc_panel_encoder_prepare,
- .commit = atmel_hlcdc_panel_encoder_commit,
.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
+ .disable = atmel_hlcdc_panel_encoder_disable,
+ .enable = atmel_hlcdc_panel_encoder_enable,
};
static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
@@ -226,10 +212,13 @@ atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = atmel_hlcdc_panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = atmel_hlcdc_panel_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index c5892dcfd745..be9fa8220499 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -19,6 +19,59 @@
#include "atmel_hlcdc_dc.h"
+/**
+ * Atmel HLCDC Plane state structure.
+ *
+ * @base: DRM plane state
+ * @crtc_x: x position of the plane relative to the CRTC
+ * @crtc_y: y position of the plane relative to the CRTC
+ * @crtc_w: visible width of the plane
+ * @crtc_h: visible height of the plane
+ * @src_x: x buffer position
+ * @src_y: y buffer position
+ * @src_w: buffer width
+ * @src_h: buffer height
+ * @alpha: alpha blending of the plane
+ * @bpp: bytes per pixel deduced from pixel_format
+ * @offsets: offsets to apply to the GEM buffers
+ * @xstride: value to add to the pixel pointer between each line
+ * @pstride: value to add to the pixel pointer between each pixel
+ * @nplanes: number of planes (deduced from pixel_format)
+ */
+struct atmel_hlcdc_plane_state {
+ struct drm_plane_state base;
+ int crtc_x;
+ int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ uint32_t src_x;
+ uint32_t src_y;
+ uint32_t src_w;
+ uint32_t src_h;
+
+ u8 alpha;
+
+ bool disc_updated;
+
+ int disc_x;
+ int disc_y;
+ int disc_w;
+ int disc_h;
+
+ /* These fields are private and should not be touched */
+ int bpp[ATMEL_HLCDC_MAX_PLANES];
+ unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
+ int xstride[ATMEL_HLCDC_MAX_PLANES];
+ int pstride[ATMEL_HLCDC_MAX_PLANES];
+ int nplanes;
+};
+
+static inline struct atmel_hlcdc_plane_state *
+drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s)
+{
+ return container_of(s, struct atmel_hlcdc_plane_state, base);
+}
+
#define SUBPIXEL_MASK 0xffff
static uint32_t rgb_formats[] = {
@@ -128,7 +181,7 @@ static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
return 0;
}
-static bool atmel_hlcdc_format_embedds_alpha(u32 format)
+static bool atmel_hlcdc_format_embeds_alpha(u32 format)
{
int i;
@@ -204,7 +257,7 @@ static u32 heo_upscaling_ycoef[] = {
static void
atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
@@ -213,69 +266,69 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->size,
0xffffffff,
- (req->crtc_w - 1) |
- ((req->crtc_h - 1) << 16));
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
if (layout->memsize)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->memsize,
0xffffffff,
- (req->src_w - 1) |
- ((req->src_h - 1) << 16));
+ (state->src_w - 1) |
+ ((state->src_h - 1) << 16));
if (layout->pos)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pos,
0xffffffff,
- req->crtc_x |
- (req->crtc_y << 16));
+ state->crtc_x |
+ (state->crtc_y << 16));
/* TODO: rework the rescaling part */
- if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) {
+ if (state->crtc_w != state->src_w || state->crtc_h != state->src_h) {
u32 factor_reg = 0;
- if (req->crtc_w != req->src_w) {
+ if (state->crtc_w != state->src_w) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_xcoef;
u32 max_memsize;
- if (req->crtc_w < req->src_w)
+ if (state->crtc_w < state->src_w)
coeff_tab = heo_downscaling_xcoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
17 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * req->src_w) - (256 * 4)) /
- req->crtc_w;
+ factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+ state->crtc_w;
factor++;
- max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
2048;
- if (max_memsize > req->src_w)
+ if (max_memsize > state->src_w)
factor--;
factor_reg |= factor | 0x80000000;
}
- if (req->crtc_h != req->src_h) {
+ if (state->crtc_h != state->src_h) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
- if (req->crtc_w < req->src_w)
+ if (state->crtc_w < state->src_w)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * req->src_w) - (256 * 4)) /
- req->crtc_w;
+ factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+ state->crtc_w;
factor++;
- max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
2048;
- if (max_memsize > req->src_w)
+ if (max_memsize > state->src_w)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
@@ -287,7 +340,7 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
static void
atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
@@ -297,10 +350,11 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER;
- if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))
+ if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
- cfg |= ATMEL_HLCDC_LAYER_GAEN;
+ cfg |= ATMEL_HLCDC_LAYER_GAEN |
+ ATMEL_HLCDC_LAYER_GA(state->alpha);
}
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -312,24 +366,26 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER |
ATMEL_HLCDC_LAYER_GAEN |
+ ATMEL_HLCDC_LAYER_GA_MASK |
ATMEL_HLCDC_LAYER_LAEN |
ATMEL_HLCDC_LAYER_OVR |
ATMEL_HLCDC_LAYER_DMA, cfg);
}
static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
u32 cfg;
int ret;
- ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg);
+ ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format,
+ &cfg);
if (ret)
return;
- if ((req->fb->pixel_format == DRM_FORMAT_YUV422 ||
- req->fb->pixel_format == DRM_FORMAT_NV61) &&
- (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+ if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
+ state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
+ (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -341,7 +397,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
* Rotation optimization is not working on RGB888 (rotation is still
* working but without any optimization).
*/
- if (req->fb->pixel_format == DRM_FORMAT_RGB888)
+ if (state->base.fb->pixel_format == DRM_FORMAT_RGB888)
cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
else
cfg = 0;
@@ -352,73 +408,142 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
}
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
struct atmel_hlcdc_layer *layer = &plane->layer;
const struct atmel_hlcdc_layer_cfg_layout *layout =
&layer->desc->layout;
int i;
- atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets);
+ atmel_hlcdc_layer_update_set_fb(&plane->layer, state->base.fb,
+ state->offsets);
- for (i = 0; i < req->nplanes; i++) {
+ for (i = 0; i < state->nplanes; i++) {
if (layout->xstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->xstride[i],
0xffffffff,
- req->xstride[i]);
+ state->xstride[i]);
}
if (layout->pstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pstride[i],
0xffffffff,
- req->pstride[i]);
+ state->pstride[i]);
}
}
}
-static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode)
+int
+atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- const struct atmel_hlcdc_layer_cfg_layout *layout =
- &plane->layer.desc->layout;
+ int disc_x = 0, disc_y = 0, disc_w = 0, disc_h = 0;
+ const struct atmel_hlcdc_layer_cfg_layout *layout;
+ struct atmel_hlcdc_plane_state *primary_state;
+ struct drm_plane_state *primary_s;
+ struct atmel_hlcdc_plane *primary;
+ struct drm_plane *ovl;
+
+ primary = drm_plane_to_atmel_hlcdc_plane(c_state->crtc->primary);
+ layout = &primary->layer.desc->layout;
+ if (!layout->disc_pos || !layout->disc_size)
+ return 0;
+
+ primary_s = drm_atomic_get_plane_state(c_state->state,
+ &primary->base);
+ if (IS_ERR(primary_s))
+ return PTR_ERR(primary_s);
+
+ primary_state = drm_plane_state_to_atmel_hlcdc_plane_state(primary_s);
+
+ drm_atomic_crtc_state_for_each_plane(ovl, c_state) {
+ struct atmel_hlcdc_plane_state *ovl_state;
+ struct drm_plane_state *ovl_s;
+
+ if (ovl == c_state->crtc->primary)
+ continue;
- if (!layout->size &&
- (mode->hdisplay != req->crtc_w ||
- mode->vdisplay != req->crtc_h))
- return -EINVAL;
+ ovl_s = drm_atomic_get_plane_state(c_state->state, ovl);
+ if (IS_ERR(ovl_s))
+ return PTR_ERR(ovl_s);
- if (plane->layer.desc->max_height &&
- req->crtc_h > plane->layer.desc->max_height)
- return -EINVAL;
+ ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
- if (plane->layer.desc->max_width &&
- req->crtc_w > plane->layer.desc->max_width)
- return -EINVAL;
+ if (!ovl_s->fb ||
+ atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) ||
+ ovl_state->alpha != 255)
+ continue;
- if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) &&
- (!layout->memsize ||
- atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)))
- return -EINVAL;
+ /* TODO: implement a smarter hidden area detection */
+ if (ovl_state->crtc_h * ovl_state->crtc_w < disc_h * disc_w)
+ continue;
- if (req->crtc_x < 0 || req->crtc_y < 0)
- return -EINVAL;
+ disc_x = ovl_state->crtc_x;
+ disc_y = ovl_state->crtc_y;
+ disc_h = ovl_state->crtc_h;
+ disc_w = ovl_state->crtc_w;
+ }
- if (req->crtc_w + req->crtc_x > mode->hdisplay ||
- req->crtc_h + req->crtc_y > mode->vdisplay)
- return -EINVAL;
+ if (disc_x == primary_state->disc_x &&
+ disc_y == primary_state->disc_y &&
+ disc_w == primary_state->disc_w &&
+ disc_h == primary_state->disc_h)
+ return 0;
+
+
+ primary_state->disc_x = disc_x;
+ primary_state->disc_y = disc_y;
+ primary_state->disc_w = disc_w;
+ primary_state->disc_h = disc_h;
+ primary_state->disc_updated = true;
return 0;
}
-int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode)
+static void
+atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
+{
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+ int disc_surface = 0;
+
+ if (!state->disc_updated)
+ return;
+
+ disc_surface = state->disc_h * state->disc_w;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
+ ATMEL_HLCDC_LAYER_DISCEN,
+ disc_surface ? ATMEL_HLCDC_LAYER_DISCEN : 0);
+
+ if (!disc_surface)
+ return;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->disc_pos,
+ 0xffffffff,
+ state->disc_x | (state->disc_y << 16));
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->disc_size,
+ 0xffffffff,
+ (state->disc_w - 1) |
+ ((state->disc_h - 1) << 16));
+}
+
+static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
+ struct drm_plane_state *s)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+ struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_display_mode *mode;
+ struct drm_crtc_state *crtc_state;
unsigned int patched_crtc_w;
unsigned int patched_crtc_h;
unsigned int patched_src_w;
@@ -430,196 +555,196 @@ int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
int vsub = 1;
int i;
- if ((req->src_x | req->src_y | req->src_w | req->src_h) &
+ if (!state->base.crtc || !fb)
+ return 0;
+
+ crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
+ mode = &crtc_state->adjusted_mode;
+
+ state->src_x = s->src_x;
+ state->src_y = s->src_y;
+ state->src_h = s->src_h;
+ state->src_w = s->src_w;
+ state->crtc_x = s->crtc_x;
+ state->crtc_y = s->crtc_y;
+ state->crtc_h = s->crtc_h;
+ state->crtc_w = s->crtc_w;
+ if ((state->src_x | state->src_y | state->src_w | state->src_h) &
SUBPIXEL_MASK)
return -EINVAL;
- req->src_x >>= 16;
- req->src_y >>= 16;
- req->src_w >>= 16;
- req->src_h >>= 16;
+ state->src_x >>= 16;
+ state->src_y >>= 16;
+ state->src_w >>= 16;
+ state->src_h >>= 16;
- req->nplanes = drm_format_num_planes(req->fb->pixel_format);
- if (req->nplanes > ATMEL_HLCDC_MAX_PLANES)
+ state->nplanes = drm_format_num_planes(fb->pixel_format);
+ if (state->nplanes > ATMEL_HLCDC_MAX_PLANES)
return -EINVAL;
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
- tmp = req->crtc_w;
- req->crtc_w = req->crtc_h;
- req->crtc_h = tmp;
- tmp = req->src_w;
- req->src_w = req->src_h;
- req->src_h = tmp;
+ if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+ tmp = state->crtc_w;
+ state->crtc_w = state->crtc_h;
+ state->crtc_h = tmp;
+ tmp = state->src_w;
+ state->src_w = state->src_h;
+ state->src_h = tmp;
}
- if (req->crtc_x + req->crtc_w > mode->hdisplay)
- patched_crtc_w = mode->hdisplay - req->crtc_x;
+ if (state->crtc_x + state->crtc_w > mode->hdisplay)
+ patched_crtc_w = mode->hdisplay - state->crtc_x;
else
- patched_crtc_w = req->crtc_w;
+ patched_crtc_w = state->crtc_w;
- if (req->crtc_x < 0) {
- patched_crtc_w += req->crtc_x;
- x_offset = -req->crtc_x;
- req->crtc_x = 0;
+ if (state->crtc_x < 0) {
+ patched_crtc_w += state->crtc_x;
+ x_offset = -state->crtc_x;
+ state->crtc_x = 0;
}
- if (req->crtc_y + req->crtc_h > mode->vdisplay)
- patched_crtc_h = mode->vdisplay - req->crtc_y;
+ if (state->crtc_y + state->crtc_h > mode->vdisplay)
+ patched_crtc_h = mode->vdisplay - state->crtc_y;
else
- patched_crtc_h = req->crtc_h;
+ patched_crtc_h = state->crtc_h;
- if (req->crtc_y < 0) {
- patched_crtc_h += req->crtc_y;
- y_offset = -req->crtc_y;
- req->crtc_y = 0;
+ if (state->crtc_y < 0) {
+ patched_crtc_h += state->crtc_y;
+ y_offset = -state->crtc_y;
+ state->crtc_y = 0;
}
- patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w,
- req->crtc_w);
- patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h,
- req->crtc_h);
+ patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * state->src_w,
+ state->crtc_w);
+ patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h,
+ state->crtc_h);
- hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
- for (i = 0; i < req->nplanes; i++) {
+ for (i = 0; i < state->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? hsub : 1;
int ydiv = i ? vsub : 1;
- req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i);
- if (!req->bpp[i])
+ state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i);
+ if (!state->bpp[i])
return -EINVAL;
- switch (plane->rotation & 0xf) {
+ switch (state->base.rotation & 0xf) {
case BIT(DRM_ROTATE_90):
- offset = ((y_offset + req->src_y + patched_src_w - 1) /
- ydiv) * req->fb->pitches[i];
- offset += ((x_offset + req->src_x) / xdiv) *
- req->bpp[i];
- req->xstride[i] = ((patched_src_w - 1) / ydiv) *
- req->fb->pitches[i];
- req->pstride[i] = -req->fb->pitches[i] - req->bpp[i];
+ offset = ((y_offset + state->src_y + patched_src_w - 1) /
+ ydiv) * fb->pitches[i];
+ offset += ((x_offset + state->src_x) / xdiv) *
+ state->bpp[i];
+ state->xstride[i] = ((patched_src_w - 1) / ydiv) *
+ fb->pitches[i];
+ state->pstride[i] = -fb->pitches[i] - state->bpp[i];
break;
case BIT(DRM_ROTATE_180):
- offset = ((y_offset + req->src_y + patched_src_h - 1) /
- ydiv) * req->fb->pitches[i];
- offset += ((x_offset + req->src_x + patched_src_w - 1) /
- xdiv) * req->bpp[i];
- req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
- req->bpp[i]) - req->fb->pitches[i];
- req->pstride[i] = -2 * req->bpp[i];
+ offset = ((y_offset + state->src_y + patched_src_h - 1) /
+ ydiv) * fb->pitches[i];
+ offset += ((x_offset + state->src_x + patched_src_w - 1) /
+ xdiv) * state->bpp[i];
+ state->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
+ state->bpp[i]) - fb->pitches[i];
+ state->pstride[i] = -2 * state->bpp[i];
break;
case BIT(DRM_ROTATE_270):
- offset = ((y_offset + req->src_y) / ydiv) *
- req->fb->pitches[i];
- offset += ((x_offset + req->src_x + patched_src_h - 1) /
- xdiv) * req->bpp[i];
- req->xstride[i] = -(((patched_src_w - 1) / ydiv) *
- req->fb->pitches[i]) -
- (2 * req->bpp[i]);
- req->pstride[i] = req->fb->pitches[i] - req->bpp[i];
+ offset = ((y_offset + state->src_y) / ydiv) *
+ fb->pitches[i];
+ offset += ((x_offset + state->src_x + patched_src_h - 1) /
+ xdiv) * state->bpp[i];
+ state->xstride[i] = -(((patched_src_w - 1) / ydiv) *
+ fb->pitches[i]) -
+ (2 * state->bpp[i]);
+ state->pstride[i] = fb->pitches[i] - state->bpp[i];
break;
case BIT(DRM_ROTATE_0):
default:
- offset = ((y_offset + req->src_y) / ydiv) *
- req->fb->pitches[i];
- offset += ((x_offset + req->src_x) / xdiv) *
- req->bpp[i];
- req->xstride[i] = req->fb->pitches[i] -
+ offset = ((y_offset + state->src_y) / ydiv) *
+ fb->pitches[i];
+ offset += ((x_offset + state->src_x) / xdiv) *
+ state->bpp[i];
+ state->xstride[i] = fb->pitches[i] -
((patched_src_w / xdiv) *
- req->bpp[i]);
- req->pstride[i] = 0;
+ state->bpp[i]);
+ state->pstride[i] = 0;
break;
}
- req->offsets[i] = offset + req->fb->offsets[i];
+ state->offsets[i] = offset + fb->offsets[i];
}
- req->src_w = patched_src_w;
- req->src_h = patched_src_h;
- req->crtc_w = patched_crtc_w;
- req->crtc_h = patched_crtc_h;
+ state->src_w = patched_src_w;
+ state->src_h = patched_src_h;
+ state->crtc_w = patched_crtc_w;
+ state->crtc_h = patched_crtc_h;
- return atmel_hlcdc_plane_check_update_req(p, req, mode);
-}
+ if (!layout->size &&
+ (mode->hdisplay != state->crtc_w ||
+ mode->vdisplay != state->crtc_h))
+ return -EINVAL;
-int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- int ret;
+ if (plane->layer.desc->max_height &&
+ state->crtc_h > plane->layer.desc->max_height)
+ return -EINVAL;
- ret = atmel_hlcdc_layer_update_start(&plane->layer);
- if (ret)
- return ret;
+ if (plane->layer.desc->max_width &&
+ state->crtc_w > plane->layer.desc->max_width)
+ return -EINVAL;
- atmel_hlcdc_plane_update_pos_and_size(plane, req);
- atmel_hlcdc_plane_update_general_settings(plane, req);
- atmel_hlcdc_plane_update_format(plane, req);
- atmel_hlcdc_plane_update_buffers(plane, req);
+ if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
+ (!layout->memsize ||
+ atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)))
+ return -EINVAL;
- atmel_hlcdc_layer_update_commit(&plane->layer);
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_w + state->crtc_x > mode->hdisplay ||
+ state->crtc_h + state->crtc_y > mode->vdisplay)
+ return -EINVAL;
return 0;
}
-int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- const struct drm_display_mode *mode)
+static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- struct atmel_hlcdc_plane_update_req req;
- int ret = 0;
-
- memset(&req, 0, sizeof(req));
- req.crtc_x = crtc_x;
- req.crtc_y = crtc_y;
- req.crtc_w = crtc_w;
- req.crtc_h = crtc_h;
- req.src_x = src_x;
- req.src_y = src_y;
- req.src_w = src_w;
- req.src_h = src_h;
- req.fb = fb;
-
- ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode);
- if (ret)
- return ret;
- if (!req.crtc_h || !req.crtc_w)
- return atmel_hlcdc_layer_disable(&plane->layer);
-
- return atmel_hlcdc_plane_apply_update_req(&plane->base, &req);
+ return atmel_hlcdc_layer_update_start(&plane->layer);
}
-static int atmel_hlcdc_plane_update(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
+ struct drm_plane_state *old_s)
{
- return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y,
- crtc_w, crtc_h, src_x, src_y,
- src_w, src_h, &crtc->hwmode);
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+
+ if (!p->state->crtc || !p->state->fb)
+ return;
+
+ atmel_hlcdc_plane_update_pos_and_size(plane, state);
+ atmel_hlcdc_plane_update_general_settings(plane, state);
+ atmel_hlcdc_plane_update_format(plane, state);
+ atmel_hlcdc_plane_update_buffers(plane, state);
+ atmel_hlcdc_plane_update_disc_area(plane, state);
+
+ atmel_hlcdc_layer_update_commit(&plane->layer);
}
-static int atmel_hlcdc_plane_disable(struct drm_plane *p)
+static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
+ struct drm_plane_state *old_state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- return atmel_hlcdc_layer_disable(&plane->layer);
+ atmel_hlcdc_layer_disable(&plane->layer);
}
static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
@@ -635,38 +760,36 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
devm_kfree(p->dev->dev, plane);
}
-static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane,
- u8 alpha)
+static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p,
+ struct drm_plane_state *s,
+ struct drm_property *property,
+ uint64_t val)
{
- atmel_hlcdc_layer_update_start(&plane->layer);
- atmel_hlcdc_layer_update_cfg(&plane->layer,
- plane->layer.desc->layout.general_config,
- ATMEL_HLCDC_LAYER_GA_MASK,
- alpha << ATMEL_HLCDC_LAYER_GA_SHIFT);
- atmel_hlcdc_layer_update_commit(&plane->layer);
-
- return 0;
-}
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_properties *props = plane->properties;
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
-static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane,
- unsigned int rotation)
-{
- plane->rotation = rotation;
+ if (property == props->alpha)
+ state->alpha = val;
+ else
+ return -EINVAL;
return 0;
}
-static int atmel_hlcdc_plane_set_property(struct drm_plane *p,
- struct drm_property *property,
- uint64_t value)
+static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
+ const struct drm_plane_state *s,
+ struct drm_property *property,
+ uint64_t *val)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_properties *props = plane->properties;
+ const struct atmel_hlcdc_plane_state *state =
+ container_of(s, const struct atmel_hlcdc_plane_state, base);
if (property == props->alpha)
- atmel_hlcdc_plane_set_alpha(plane, value);
- else if (property == props->rotation)
- atmel_hlcdc_plane_set_rotation(plane, value);
+ *val = state->alpha;
else
return -EINVAL;
@@ -694,8 +817,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
if (desc->layout.xstride && desc->layout.pstride)
drm_object_attach_property(&plane->base.base,
- props->rotation,
- BIT(DRM_ROTATE_0));
+ plane->base.dev->mode_config.rotation_property,
+ BIT(DRM_ROTATE_0));
if (desc->layout.csc) {
/*
@@ -717,11 +840,76 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
}
}
+static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
+ .prepare_fb = atmel_hlcdc_plane_prepare_fb,
+ .atomic_check = atmel_hlcdc_plane_atomic_check,
+ .atomic_update = atmel_hlcdc_plane_atomic_update,
+ .atomic_disable = atmel_hlcdc_plane_atomic_disable,
+};
+
+static void atmel_hlcdc_plane_reset(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane_state *state;
+
+ if (p->state) {
+ state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+
+ if (state->base.fb)
+ drm_framebuffer_unreference(state->base.fb);
+
+ kfree(state);
+ p->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->alpha = 255;
+ p->state = &state->base;
+ p->state->plane = p;
+ }
+}
+
+static struct drm_plane_state *
+atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+ struct atmel_hlcdc_plane_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ copy->disc_updated = false;
+
+ if (copy->base.fb)
+ drm_framebuffer_reference(copy->base.fb);
+
+ return &copy->base;
+}
+
+static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *s)
+{
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
+
+ if (s->fb)
+ drm_framebuffer_unreference(s->fb);
+
+ kfree(state);
+}
+
static struct drm_plane_funcs layer_plane_funcs = {
- .update_plane = atmel_hlcdc_plane_update,
- .disable_plane = atmel_hlcdc_plane_disable,
- .set_property = atmel_hlcdc_plane_set_property,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = atmel_hlcdc_plane_destroy,
+ .reset = atmel_hlcdc_plane_reset,
+ .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
+ .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
+ .atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
+ .atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
};
static struct atmel_hlcdc_plane *
@@ -755,6 +943,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
+ drm_plane_helper_add(&plane->base,
+ &atmel_hlcdc_layer_plane_helper_funcs);
+
/* Set default property values*/
atmel_hlcdc_plane_init_properties(plane, desc, props);
@@ -774,12 +965,13 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- props->rotation = drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_180) |
- BIT(DRM_ROTATE_270));
- if (!props->rotation)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270));
+ if (!dev->mode_config.rotation_property)
return ERR_PTR(-ENOMEM);
return props;
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 460389702d31..a39b0343c197 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -164,6 +164,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index f38bbcdf929b..acef3223772c 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -11,3 +11,14 @@ config DRM_PTN3460
select DRM_PANEL
---help---
ptn3460 eDP-LVDS bridge chip driver.
+
+config DRM_PS8622
+ tristate "Parade eDP/LVDS bridge"
+ depends on DRM
+ depends on OF
+ select DRM_PANEL
+ select DRM_KMS_HELPER
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d8a8cfd12fbb..8dfebd984370 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_PS8622) += ps8622.o
obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index cd6a70647e32..49cafb61d290 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/hdmi.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <drm/drm_of.h>
@@ -126,6 +127,7 @@ struct dw_hdmi {
struct i2c_adapter *ddc;
void __iomem *regs;
+ struct mutex audio_mutex;
unsigned int sample_rate;
int ratio;
@@ -177,26 +179,23 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
-static void hdmi_set_clock_regenerator_n(struct dw_hdmi *hdmi,
- unsigned int value)
+static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
+ unsigned int n)
{
- hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
- hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
- hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
+ /* Must be set/cleared first */
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
/* nshift factor = 0 */
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
-}
-
-static void hdmi_regenerate_cts(struct dw_hdmi *hdmi, unsigned int cts)
-{
- /* Must be set/cleared first */
- hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
- hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
- hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+ hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
+ hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
+
+ hdmi_writeb(hdmi, (n >> 16) & 0x0f, HDMI_AUD_N3);
+ hdmi_writeb(hdmi, (n >> 8) & 0xff, HDMI_AUD_N2);
+ hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
}
static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
@@ -355,18 +354,21 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
__func__, hdmi->sample_rate, hdmi->ratio,
pixel_clk, clk_n, clk_cts);
- hdmi_set_clock_regenerator_n(hdmi, clk_n);
- hdmi_regenerate_cts(hdmi, clk_cts);
+ hdmi_set_cts_n(hdmi, clk_cts, clk_n);
}
static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
{
+ mutex_lock(&hdmi->audio_mutex);
hdmi_set_clk_regenerator(hdmi, 74250000);
+ mutex_unlock(&hdmi->audio_mutex);
}
static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
{
+ mutex_lock(&hdmi->audio_mutex);
hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+ mutex_unlock(&hdmi->audio_mutex);
}
/*
@@ -753,10 +755,10 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
{
unsigned res_idx, i;
u8 val, msec;
- const struct dw_hdmi_mpll_config *mpll_config =
- hdmi->plat_data->mpll_cfg;
- const struct dw_hdmi_curr_ctrl *curr_ctrl = hdmi->plat_data->cur_ctr;
- const struct dw_hdmi_sym_term *sym_term = hdmi->plat_data->sym_term;
+ const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
+ const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
+ const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
if (prep)
return -EINVAL;
@@ -827,18 +829,18 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
- for (i = 0; sym_term[i].mpixelclock != (~0UL); i++)
+ for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
if (hdmi->hdmi_data.video_mode.mpixelclock <=
- sym_term[i].mpixelclock)
+ phy_config[i].mpixelclock)
break;
/* RESISTANCE TERM 133Ohm Cfg */
- hdmi_phy_i2c_write(hdmi, sym_term[i].term, 0x19); /* TXTERM */
+ hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19); /* TXTERM */
/* PREEMP Cgf 0.00 */
- hdmi_phy_i2c_write(hdmi, sym_term[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
-
+ hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
/* TX/CK LVL 10 */
- hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
+ hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+
/* REMOVE CLK TERM */
hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
@@ -1569,6 +1571,8 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->ratio = 100;
hdmi->encoder = encoder;
+ mutex_init(&hdmi->audio_mutex);
+
of_property_read_u32(np, "reg-io-width", &val);
switch (val) {
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c
new file mode 100644
index 000000000000..e895aa7ea353
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ps8622.c
@@ -0,0 +1,684 @@
+/*
+ * Parade PS8622 eDP/LVDS bridge driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_panel.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/* Brightness scale on the Parade chip */
+#define PS8622_MAX_BRIGHTNESS 0xff
+
+/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */
+#define PS8622_POWER_RISE_T1_MIN_US 10
+#define PS8622_POWER_RISE_T1_MAX_US 10000
+#define PS8622_RST_HIGH_T2_MIN_US 3000
+#define PS8622_RST_HIGH_T2_MAX_US 30000
+#define PS8622_PWMO_END_T12_MS 200
+#define PS8622_POWER_FALL_T16_MAX_US 10000
+#define PS8622_POWER_OFF_T17_MS 500
+
+#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \
+ (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US))
+#error "T2.min + T1.max must be less than T2.max + T1.min"
+#endif
+
+struct ps8622_bridge {
+ struct drm_connector connector;
+ struct i2c_client *client;
+ struct drm_bridge bridge;
+ struct drm_panel *panel;
+ struct regulator *v12;
+ struct backlight_device *bl;
+
+ struct gpio_desc *gpio_slp;
+ struct gpio_desc *gpio_rst;
+
+ u32 max_lane_count;
+ u32 lane_count;
+
+ bool enabled;
+};
+
+static inline struct ps8622_bridge *
+ bridge_to_ps8622(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ps8622_bridge, bridge);
+}
+
+static inline struct ps8622_bridge *
+ connector_to_ps8622(struct drm_connector *connector)
+{
+ return container_of(connector, struct ps8622_bridge, connector);
+}
+
+static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
+{
+ int ret;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg;
+ u8 data[] = {reg, val};
+
+ msg.addr = client->addr + page;
+ msg.flags = 0;
+ msg.len = sizeof(data);
+ msg.buf = data;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret != 1)
+ pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n",
+ client->addr + page, reg, val, ret);
+ return !(ret == 1);
+}
+
+static int ps8622_send_config(struct ps8622_bridge *ps8622)
+{
+ struct i2c_client *cl = ps8622->client;
+ int err = 0;
+
+ /* HPD low */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x01);
+ if (err)
+ goto error;
+
+ /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */
+ err = ps8622_set(cl, 0x04, 0x14, 0x01);
+ if (err)
+ goto error;
+
+ /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */
+ err = ps8622_set(cl, 0x04, 0xe3, 0x20);
+ if (err)
+ goto error;
+
+ /* [7] RCO SS enable */
+ err = ps8622_set(cl, 0x04, 0xe2, 0x80);
+ if (err)
+ goto error;
+
+ /* RPHY Setting
+ * [3:2] CDR tune wait cycle before measure for fine tune
+ * b00: 1us b01: 0.5us b10:2us, b11: 4us
+ */
+ err = ps8622_set(cl, 0x04, 0x8a, 0x0c);
+ if (err)
+ goto error;
+
+ /* [3] RFD always on */
+ err = ps8622_set(cl, 0x04, 0x89, 0x08);
+ if (err)
+ goto error;
+
+ /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */
+ err = ps8622_set(cl, 0x04, 0x71, 0x2d);
+ if (err)
+ goto error;
+
+ /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */
+ err = ps8622_set(cl, 0x04, 0x7d, 0x07);
+ if (err)
+ goto error;
+
+ /* [1:0] Fmin=+4bands */
+ err = ps8622_set(cl, 0x04, 0x7b, 0x00);
+ if (err)
+ goto error;
+
+ /* [7:5] DCO_FTRNG=+-40% */
+ err = ps8622_set(cl, 0x04, 0x7a, 0xfd);
+ if (err)
+ goto error;
+
+ /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */
+ err = ps8622_set(cl, 0x04, 0xc0, 0x12);
+ if (err)
+ goto error;
+
+ /* Gitune=-37% */
+ err = ps8622_set(cl, 0x04, 0xc1, 0x92);
+ if (err)
+ goto error;
+
+ /* Fbstep=100% */
+ err = ps8622_set(cl, 0x04, 0xc2, 0x1c);
+ if (err)
+ goto error;
+
+ /* [7] LOS signal disable */
+ err = ps8622_set(cl, 0x04, 0x32, 0x80);
+ if (err)
+ goto error;
+
+ /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */
+ err = ps8622_set(cl, 0x04, 0x00, 0xb0);
+ if (err)
+ goto error;
+
+ /* [7:6] Right-bar GPIO output strength is 8mA */
+ err = ps8622_set(cl, 0x04, 0x15, 0x40);
+ if (err)
+ goto error;
+
+ /* EQ Training State Machine Setting, RCO calibration start */
+ err = ps8622_set(cl, 0x04, 0x54, 0x10);
+ if (err)
+ goto error;
+
+ /* Logic, needs more than 10 I2C command */
+ /* [4:0] MAX_LANE_COUNT set to max supported lanes */
+ err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count);
+ if (err)
+ goto error;
+
+ /* [4:0] LANE_COUNT_SET set to chosen lane count */
+ err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x52, 0x20);
+ if (err)
+ goto error;
+
+ /* HPD CP toggle enable */
+ err = ps8622_set(cl, 0x00, 0xf1, 0x03);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x62, 0x41);
+ if (err)
+ goto error;
+
+ /* Counter number, add 1ms counter delay */
+ err = ps8622_set(cl, 0x00, 0xf6, 0x01);
+ if (err)
+ goto error;
+
+ /* [6]PWM function control by DPCD0040f[7], default is PWM block */
+ err = ps8622_set(cl, 0x00, 0x77, 0x06);
+ if (err)
+ goto error;
+
+ /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */
+ err = ps8622_set(cl, 0x00, 0x4c, 0x04);
+ if (err)
+ goto error;
+
+ /* DPCD00400='h00, Parade OUI ='h001cf8 */
+ err = ps8622_set(cl, 0x01, 0xc0, 0x00);
+ if (err)
+ goto error;
+
+ /* DPCD00401='h1c */
+ err = ps8622_set(cl, 0x01, 0xc1, 0x1c);
+ if (err)
+ goto error;
+
+ /* DPCD00402='hf8 */
+ err = ps8622_set(cl, 0x01, 0xc2, 0xf8);
+ if (err)
+ goto error;
+
+ /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */
+ err = ps8622_set(cl, 0x01, 0xc3, 0x44);
+ if (err)
+ goto error;
+
+ /* DPCD404 */
+ err = ps8622_set(cl, 0x01, 0xc4, 0x32);
+ if (err)
+ goto error;
+
+ /* DPCD405 */
+ err = ps8622_set(cl, 0x01, 0xc5, 0x53);
+ if (err)
+ goto error;
+
+ /* DPCD406 */
+ err = ps8622_set(cl, 0x01, 0xc6, 0x4c);
+ if (err)
+ goto error;
+
+ /* DPCD407 */
+ err = ps8622_set(cl, 0x01, 0xc7, 0x56);
+ if (err)
+ goto error;
+
+ /* DPCD408 */
+ err = ps8622_set(cl, 0x01, 0xc8, 0x35);
+ if (err)
+ goto error;
+
+ /* DPCD40A, Initial Code major revision '01' */
+ err = ps8622_set(cl, 0x01, 0xca, 0x01);
+ if (err)
+ goto error;
+
+ /* DPCD40B, Initial Code minor revision '05' */
+ err = ps8622_set(cl, 0x01, 0xcb, 0x05);
+ if (err)
+ goto error;
+
+
+ if (ps8622->bl) {
+ /* DPCD720, internal PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0xa0);
+ if (err)
+ goto error;
+
+ /* FFh for 100% brightness, 0h for 0% brightness */
+ err = ps8622_set(cl, 0x01, 0xa7,
+ ps8622->bl->props.brightness);
+ if (err)
+ goto error;
+ } else {
+ /* DPCD720, external PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0x80);
+ if (err)
+ goto error;
+ }
+
+ /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */
+ err = ps8622_set(cl, 0x01, 0xcc, 0x13);
+ if (err)
+ goto error;
+
+ /* Enable SSC set by register */
+ err = ps8622_set(cl, 0x02, 0xb1, 0x20);
+ if (err)
+ goto error;
+
+ /* Set SSC enabled and +/-1% central spreading */
+ err = ps8622_set(cl, 0x04, 0x10, 0x16);
+ if (err)
+ goto error;
+
+ /* Logic end */
+ /* MPU Clock source: LC => RCO */
+ err = ps8622_set(cl, 0x04, 0x59, 0x60);
+ if (err)
+ goto error;
+
+ /* LC -> RCO */
+ err = ps8622_set(cl, 0x04, 0x54, 0x14);
+ if (err)
+ goto error;
+
+ /* HPD high */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x91);
+
+error:
+ return err ? -EIO : 0;
+}
+
+static int ps8622_backlight_update(struct backlight_device *bl)
+{
+ struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev);
+ int ret, brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ if (!ps8622->enabled)
+ return -EINVAL;
+
+ ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness);
+
+ return ret;
+}
+
+static const struct backlight_ops ps8622_backlight_ops = {
+ .update_status = ps8622_backlight_update,
+};
+
+static void ps8622_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (ps8622->enabled)
+ return;
+
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ if (ps8622->v12) {
+ ret = regulator_enable(ps8622->v12);
+ if (ret)
+ DRM_ERROR("fails to enable ps8622->v12");
+ }
+
+ if (drm_panel_prepare(ps8622->panel)) {
+ DRM_ERROR("failed to prepare panel\n");
+ return;
+ }
+
+ gpiod_set_value(ps8622->gpio_slp, 1);
+
+ /*
+ * T1 is the range of time that it takes for the power to rise after we
+ * enable the lcd/ps8622 fet. T2 is the range of time in which the
+ * data sheet specifies we should deassert the reset pin.
+ *
+ * If it takes T1.max for the power to rise, we need to wait atleast
+ * T2.min before deasserting the reset pin. If it takes T1.min for the
+ * power to rise, we need to wait at most T2.max before deasserting the
+ * reset pin.
+ */
+ usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US,
+ PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US);
+
+ gpiod_set_value(ps8622->gpio_rst, 1);
+
+ /* wait 20ms after RST high */
+ usleep_range(20000, 30000);
+
+ ret = ps8622_send_config(ps8622);
+ if (ret) {
+ DRM_ERROR("Failed to send config to bridge (%d)\n", ret);
+ return;
+ }
+
+ ps8622->enabled = true;
+}
+
+static void ps8622_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_enable(ps8622->panel)) {
+ DRM_ERROR("failed to enable panel\n");
+ return;
+ }
+}
+
+static void ps8622_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_disable(ps8622->panel)) {
+ DRM_ERROR("failed to disable panel\n");
+ return;
+ }
+ msleep(PS8622_PWMO_END_T12_MS);
+}
+
+static void ps8622_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (!ps8622->enabled)
+ return;
+
+ ps8622->enabled = false;
+
+ /*
+ * This doesn't matter if the regulators are turned off, but something
+ * else might keep them on. In that case, we want to assert the slp gpio
+ * to lower power.
+ */
+ gpiod_set_value(ps8622->gpio_slp, 0);
+
+ if (drm_panel_unprepare(ps8622->panel)) {
+ DRM_ERROR("failed to unprepare panel\n");
+ return;
+ }
+
+ if (ps8622->v12)
+ regulator_disable(ps8622->v12);
+
+ /*
+ * Sleep for at least the amount of time that it takes the power rail to
+ * fall to prevent asserting the rst gpio from doing anything.
+ */
+ usleep_range(PS8622_POWER_FALL_T16_MAX_US,
+ 2 * PS8622_POWER_FALL_T16_MAX_US);
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ msleep(PS8622_POWER_OFF_T17_MS);
+}
+
+static int ps8622_get_modes(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return drm_panel_get_modes(ps8622->panel);
+}
+
+static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return ps8622->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
+ .get_modes = ps8622_get_modes,
+ .best_encoder = ps8622_best_encoder,
+};
+
+static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static void ps8622_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs ps8622_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ps8622_detect,
+ .destroy = ps8622_connector_destroy,
+};
+
+static int ps8622_attach(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(bridge->dev, &ps8622->connector,
+ &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&ps8622->connector,
+ &ps8622_connector_helper_funcs);
+ drm_connector_register(&ps8622->connector);
+ drm_mode_connector_attach_encoder(&ps8622->connector,
+ bridge->encoder);
+
+ if (ps8622->panel)
+ drm_panel_attach(ps8622->panel, &ps8622->connector);
+
+ drm_helper_hpd_irq_event(ps8622->connector.dev);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8622_bridge_funcs = {
+ .pre_enable = ps8622_pre_enable,
+ .enable = ps8622_enable,
+ .disable = ps8622_disable,
+ .post_disable = ps8622_post_disable,
+ .attach = ps8622_attach,
+};
+
+static const struct of_device_id ps8622_devices[] = {
+ {.compatible = "parade,ps8622",},
+ {.compatible = "parade,ps8625",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ps8622_devices);
+
+static int ps8622_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *endpoint, *panel_node;
+ struct ps8622_bridge *ps8622;
+ int ret;
+
+ ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
+ if (!ps8622)
+ return -ENOMEM;
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (panel_node) {
+ ps8622->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!ps8622->panel)
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ps8622->client = client;
+
+ ps8622->v12 = devm_regulator_get(dev, "vdd12");
+ if (IS_ERR(ps8622->v12)) {
+ dev_info(dev, "no 1.2v regulator found for PS8622\n");
+ ps8622->v12 = NULL;
+ }
+
+ ps8622->gpio_slp = devm_gpiod_get(dev, "sleep");
+ if (IS_ERR(ps8622->gpio_slp)) {
+ ret = PTR_ERR(ps8622->gpio_slp);
+ dev_err(dev, "cannot get gpio_slp %d\n", ret);
+ return ret;
+ }
+ ret = gpiod_direction_output(ps8622->gpio_slp, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_slp\n");
+ return ret;
+ }
+
+ ps8622->gpio_rst = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(ps8622->gpio_rst)) {
+ ret = PTR_ERR(ps8622->gpio_rst);
+ dev_err(dev, "cannot get gpio_rst %d\n", ret);
+ return ret;
+ }
+ /*
+ * Assert the reset pin high to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpiod_direction_output(ps8622->gpio_rst, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_rst\n");
+ return ret;
+ }
+
+ ps8622->max_lane_count = id->driver_data;
+
+ if (of_property_read_u32(dev->of_node, "lane-count",
+ &ps8622->lane_count)) {
+ ps8622->lane_count = ps8622->max_lane_count;
+ } else if (ps8622->lane_count > ps8622->max_lane_count) {
+ dev_info(dev, "lane-count property is too high,"
+ "using max_lane_count\n");
+ ps8622->lane_count = ps8622->max_lane_count;
+ }
+
+ if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) {
+ ps8622->bl = backlight_device_register("ps8622-backlight",
+ dev, ps8622, &ps8622_backlight_ops,
+ NULL);
+ if (IS_ERR(ps8622->bl)) {
+ DRM_ERROR("failed to register backlight\n");
+ ret = PTR_ERR(ps8622->bl);
+ ps8622->bl = NULL;
+ return ret;
+ }
+ ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS;
+ ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
+ }
+
+ ps8622->bridge.funcs = &ps8622_bridge_funcs;
+ ps8622->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&ps8622->bridge);
+ if (ret) {
+ DRM_ERROR("Failed to add bridge\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, ps8622);
+
+ return 0;
+}
+
+static int ps8622_remove(struct i2c_client *client)
+{
+ struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
+
+ if (ps8622->bl)
+ backlight_device_unregister(ps8622->bl);
+
+ drm_bridge_remove(&ps8622->bridge);
+
+ return 0;
+}
+
+static const struct i2c_device_id ps8622_i2c_table[] = {
+ /* Device type, max_lane_count */
+ {"ps8622", 1},
+ {"ps8625", 2},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
+
+static struct i2c_driver ps8622_driver = {
+ .id_table = ps8622_i2c_table,
+ .probe = ps8622_probe,
+ .remove = ps8622_remove,
+ .driver = {
+ .name = "ps8622",
+ .owner = THIS_MODULE,
+ .of_match_table = ps8622_devices,
+ },
+};
+module_i2c_driver(ps8622_driver);
+
+MODULE_AUTHOR("Vincent Palatin <vpalatin@chromium.org>");
+MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 826833e396f0..9d2f053382e1 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -265,7 +265,7 @@ static struct drm_connector_funcs ptn3460_connector_funcs = {
.destroy = ptn3460_connector_destroy,
};
-int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c2e9c5283136..6e3b78ee7d16 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -92,7 +92,7 @@ drm_atomic_state_alloc(struct drm_device *dev)
state->dev = dev;
- DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state);
return state;
fail:
@@ -122,7 +122,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
struct drm_mode_config *config = &dev->mode_config;
int i;
- DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
@@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
+ state->connectors[i] = NULL;
state->connector_states[i] = NULL;
}
@@ -145,6 +146,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
+ state->crtcs[i] = NULL;
state->crtc_states[i] = NULL;
}
@@ -156,6 +158,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
+ state->planes[i] = NULL;
state->plane_states[i] = NULL;
}
}
@@ -170,9 +173,12 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
*/
void drm_atomic_state_free(struct drm_atomic_state *state)
{
+ if (!state)
+ return;
+
drm_atomic_state_clear(state);
- DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
kfree_state(state);
}
@@ -217,8 +223,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
state->crtcs[index] = crtc;
crtc_state->state = state;
- DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
- crtc->base.id, crtc_state, state);
+ DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
+ crtc->base.id, crtc_state, state);
return crtc_state;
}
@@ -248,11 +254,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
/* FIXME: Mode prop is missing, which also controls ->enable. */
- if (property == config->prop_active) {
+ if (property == config->prop_active)
state->active = val;
- } else if (crtc->funcs->atomic_set_property)
+ else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
@@ -266,9 +275,17 @@ int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
- if (crtc->funcs->atomic_get_property)
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_active)
+ *val = state->active;
+ else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
/**
@@ -293,8 +310,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
*/
if (state->active && !state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
+ crtc->base.id);
return -EINVAL;
}
@@ -340,8 +357,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
state->planes[index] = plane;
plane_state->state = state;
- DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
- plane->base.id, plane_state, state);
+ DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
+ plane->base.id, plane_state, state);
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -450,6 +467,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
+ } else if (property == config->rotation_property) {
+ *val = state->rotation;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -473,14 +492,14 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
unsigned int fb_width, fb_height;
- unsigned int i;
+ int ret;
/* either *both* CRTC and FB must be set, or neither */
if (WARN_ON(state->crtc && !state->fb)) {
- DRM_DEBUG_KMS("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
return -EINVAL;
} else if (WARN_ON(state->fb && !state->crtc)) {
- DRM_DEBUG_KMS("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
return -EINVAL;
}
@@ -490,18 +509,16 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
- for (i = 0; i < plane->format_count; i++)
- if (state->fb->pixel_format == plane->format_types[i])
- break;
- if (i == plane->format_count) {
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(state->fb->pixel_format));
- return -EINVAL;
+ ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format));
+ return ret;
}
/* Give drivers some help against integer overflows */
@@ -509,9 +526,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ state->crtc_w, state->crtc_h,
+ state->crtc_x, state->crtc_y);
return -ERANGE;
}
@@ -523,12 +540,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
- state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
- state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+ state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
+ state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+ state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
@@ -575,7 +592,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
* at most the array is a bit too large.
*/
if (index >= state->num_connector) {
- DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+ DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
return ERR_PTR(-EAGAIN);
}
@@ -590,8 +607,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->connectors[index] = connector;
connector_state->state = state;
- DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
- connector->base.id, connector_state, state);
+ DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
+ connector->base.id, connector_state, state);
if (connector_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -752,17 +769,18 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
}
if (crtc)
- DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
- plane_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
+ plane_state, crtc->base.id);
else
- DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+ DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
+ plane_state);
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
/**
- * drm_atomic_set_fb_for_plane - set crtc for plane
+ * drm_atomic_set_fb_for_plane - set framebuffer for plane
* @plane_state: atomic state object for the plane
* @fb: fb to use for the plane
*
@@ -782,10 +800,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
plane_state->fb = fb;
if (fb)
- DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
+ fb->base.id, plane_state);
else
- DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+ DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ plane_state);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
@@ -818,11 +837,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc = crtc;
if (crtc)
- DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
- conn_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
+ conn_state, crtc->base.id);
else
- DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
- conn_state);
+ DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ conn_state);
return 0;
}
@@ -858,8 +877,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
if (ret)
return ret;
- DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
- crtc->base.id, state);
+ DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
+ crtc->base.id, state);
/*
* Changed connectors are already in @state, so only need to look at the
@@ -890,19 +909,18 @@ int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- int i, num_connected_connectors = 0;
-
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
- conn_state = state->connector_states[i];
+ int i, num_connected_connectors = 0;
- if (conn_state && conn_state->crtc == crtc)
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->crtc == crtc)
num_connected_connectors++;
}
- DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
- state, num_connected_connectors, crtc->base.id);
+ DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
+ state, num_connected_connectors, crtc->base.id);
return num_connected_connectors;
}
@@ -914,7 +932,7 @@ EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
*
* This function should be used by legacy entry points which don't understand
* -EDEADLK semantics. For simplicity this one will grab all modeset locks after
- * the slowpath completed.
+ * the slowpath completed.
*/
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
@@ -949,36 +967,28 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
- int nplanes = config->num_total_plane;
- int ncrtcs = config->num_crtc;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int i, ret = 0;
- DRM_DEBUG_KMS("checking %p\n", state);
-
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
- if (!plane)
- continue;
-
- ret = drm_atomic_plane_check(plane, state->plane_states[i]);
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ ret = drm_atomic_plane_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
+ plane->base.id);
return ret;
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
- ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
+ crtc->base.id);
return ret;
}
}
@@ -987,17 +997,11 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
ret = config->funcs->atomic_check(state->dev, state);
if (!state->allow_modeset) {
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->mode_changed ||
crtc_state->active_changed) {
- DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
+ crtc->base.id);
return -EINVAL;
}
}
@@ -1032,7 +1036,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_KMS("commiting %p\n", state);
+ DRM_DEBUG_ATOMIC("commiting %p\n", state);
return config->funcs->atomic_commit(state->dev, state, false);
}
@@ -1063,7 +1067,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+ DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
return config->funcs->atomic_commit(state->dev, state, true);
}
@@ -1191,6 +1195,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
unsigned plane_mask = 0;
int ret = 0;
unsigned int i, j;
@@ -1294,15 +1300,9 @@ retry:
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- int ncrtcs = dev->mode_config.num_crtc;
-
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *e;
- if (!crtc_state)
- continue;
-
e = create_vblank_event(dev, file_priv, arg->user_data);
if (!e) {
ret = -ENOMEM;
@@ -1354,14 +1354,7 @@ fail:
goto backoff;
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- int ncrtcs = dev->mode_config.num_crtc;
-
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc_state)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
destroy_vblank_event(dev, file_priv, crtc_state->event);
crtc_state->event = NULL;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7e3a52b97c7d..1d2ca52530d5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state,
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
- encoder->base.id, encoder->name,
- encoder_crtc->base.id);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id);
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
@@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state,
if (connector->state->best_encoder != encoder)
continue;
- DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
connector_state = drm_atomic_get_connector_state(state,
connector);
@@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state,
static int
update_connector_routing(struct drm_atomic_state *state, int conn_idx)
{
- struct drm_connector_helper_funcs *funcs;
+ const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
struct drm_crtc *encoder_crtc;
struct drm_connector *connector;
@@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (!connector)
return 0;
- DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
@@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
if (!connector_state->crtc) {
- DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
@@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
- DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
return -EINVAL;
}
if (new_encoder == connector_state->best_encoder) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- connector_state->crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
return 0;
}
@@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (encoder_crtc) {
ret = steal_encoder(state, new_encoder, encoder_crtc);
if (ret) {
- DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
return ret;
}
}
@@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
crtc_state = state->crtc_states[idx];
crtc_state->mode_changed = true;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- connector_state->crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
return 0;
}
@@ -248,30 +248,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
static int
mode_fixup(struct drm_atomic_state *state)
{
- int ncrtcs = state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
bool ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc_state = state->crtc_states[i];
-
- if (!crtc_state || !crtc_state->mode_changed)
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->mode_changed)
continue;
drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
}
- for (i = 0; i < state->num_connector; i++) {
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
- conn_state = state->connector_states[i];
-
- if (!conn_state)
- continue;
-
WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
if (!conn_state->crtc || !conn_state->best_encoder)
@@ -292,7 +286,7 @@ mode_fixup(struct drm_atomic_state *state)
encoder->bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("Bridge fixup failed\n");
+ DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
}
}
@@ -301,37 +295,33 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->atomic_check(encoder, crtc_state,
conn_state);
if (ret) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
+ encoder->base.id, encoder->name);
return ret;
}
} else {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
return -EINVAL;
}
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
- crtc_state = state->crtc_states[i];
- crtc = state->crtcs[i];
-
- if (!crtc_state || !crtc_state->mode_changed)
+ if (!crtc_state->mode_changed)
continue;
funcs = crtc->helper_private;
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
+ crtc->base.id);
return -EINVAL;
}
}
@@ -346,7 +336,7 @@ needs_modeset(struct drm_crtc_state *state)
}
/**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
@@ -371,32 +361,27 @@ int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int ncrtcs = dev->mode_config.num_crtc;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int i, ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc = state->crtcs[i];
- crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
- DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
+ crtc->base.id);
crtc_state->mode_changed = true;
}
if (crtc->state->enable != crtc_state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
+ crtc->base.id);
crtc_state->mode_changed = true;
}
}
- for (i = 0; i < state->num_connector; i++) {
+ for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
@@ -413,32 +398,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
- for (i = 0; i < ncrtcs; i++) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors;
- crtc = state->crtcs[i];
- crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
/*
* We must set ->active_changed after walking connectors for
* otherwise an update that only changes active would result in
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
- DRM_DEBUG_KMS("[CRTC:%d] active changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
+ crtc->base.id);
crtc_state->active_changed = true;
}
if (!needs_modeset(crtc_state))
continue;
- DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
- crtc->base.id,
- crtc_state->enable ? 'y' : 'n',
+ DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
+ crtc->base.id,
+ crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
@@ -449,8 +428,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
crtc);
if (crtc_state->enable != !!num_connectors) {
- DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
+ crtc->base.id);
return -EINVAL;
}
@@ -461,7 +440,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
*
@@ -476,17 +455,14 @@ int
drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i, ret = 0;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
funcs = plane->helper_private;
@@ -497,18 +473,14 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
+ plane->base.id);
return ret;
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -517,8 +489,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
+ crtc->base.id);
return ret;
}
}
@@ -567,27 +539,26 @@ EXPORT_SYMBOL(drm_atomic_helper_check);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int i;
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector_state *old_conn_state;
- struct drm_connector *connector;
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
- old_conn_state = old_state->connector_states[i];
- connector = old_state->connectors[i];
-
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
- if (!old_conn_state || !old_conn_state->crtc)
+ if (!old_conn_state->crtc)
continue;
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
- if (!old_crtc_state->active)
+ if (!old_crtc_state->active ||
+ !needs_modeset(old_conn_state->crtc->state))
continue;
encoder = old_conn_state->best_encoder;
@@ -600,12 +571,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = encoder->helper_private;
- DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call disable hooks twice.
+ * it away), so we won't call disable hooks twice.
*/
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
@@ -622,16 +593,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
-
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
/* Shut down everything that needs a full modeset. */
- if (!crtc || !needs_modeset(crtc->state))
+ if (!needs_modeset(crtc->state))
continue;
if (!old_crtc_state->active)
@@ -639,8 +605,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
- DRM_DEBUG_KMS("disabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+ crtc->base.id);
/* Right function depends upon target state. */
@@ -656,16 +622,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
static void
set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int i;
/* clear out existing links */
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = old_state->connectors[i];
-
- if (!connector || !connector->encoder)
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ if (!connector->encoder)
continue;
WARN_ON(!connector->encoder->crtc);
@@ -675,12 +640,8 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/* set new links */
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->crtc)
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ if (!connector->state->crtc)
continue;
if (WARN_ON(!connector->state->best_encoder))
@@ -691,14 +652,7 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/* set legacy state in the crtc structure */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
crtc->mode = crtc->state->mode;
crtc->enabled = crtc->state->enable;
crtc->x = crtc->primary->state->src_x >> 16;
@@ -709,38 +663,35 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
- if (!crtc || !crtc->state->mode_changed)
+ if (!crtc->state->mode_changed)
continue;
funcs = crtc->helper_private;
- if (crtc->state->enable) {
- DRM_DEBUG_KMS("modeset on [CRTC:%d]\n",
- crtc->base.id);
+ if (crtc->state->enable && funcs->mode_set_nofb) {
+ DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+ crtc->base.id);
funcs->mode_set_nofb(crtc);
}
}
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
- struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->best_encoder)
+ if (!connector->state->best_encoder)
continue;
encoder = connector->state->best_encoder;
@@ -752,14 +703,15 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!new_crtc_state->mode_changed)
continue;
- DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call mode_set hooks twice.
+ * it away), so we won't call mode_set hooks twice.
*/
- funcs->mode_set(encoder, mode, adjusted_mode);
+ if (funcs->mode_set)
+ funcs->mode_set(encoder, mode, adjusted_mode);
if (encoder->bridge && encoder->bridge->funcs->mode_set)
encoder->bridge->funcs->mode_set(encoder->bridge,
@@ -768,46 +720,56 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/**
- * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
- * @state: atomic state
+ * @old_state: atomic state object with old state structures
*
- * This function commits the modeset changes that need to be committed before
- * updating planes. It shuts down all the outputs that need to be shut down and
+ * This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
+ *
+ * For compatability with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
*/
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
- struct drm_atomic_state *state)
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- disable_outputs(dev, state);
- set_routing_links(dev, state);
- crtc_set_mode(dev, state);
+ disable_outputs(dev, old_state);
+ set_routing_links(dev, old_state);
+ crtc_set_mode(dev, old_state);
}
-EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
/**
- * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * This function commits the modeset changes that need to be committed after
- * updating planes: It enables all the outputs with the new configuration which
- * had to be turned off for the update.
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatability with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
*/
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
- if (!crtc || !needs_modeset(crtc->state))
+ if (!needs_modeset(crtc->state))
continue;
if (!crtc->state->active)
@@ -816,8 +778,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
funcs = crtc->helper_private;
if (crtc->state->enable) {
- DRM_DEBUG_KMS("enabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+ crtc->base.id);
if (funcs->enable)
funcs->enable(crtc);
@@ -826,28 +788,26 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
}
}
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->best_encoder)
+ if (!connector->state->best_encoder)
continue;
- if (!connector->state->crtc->state->active)
+ if (!connector->state->crtc->state->active ||
+ !needs_modeset(connector->state->crtc->state))
continue;
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
- DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call enable hooks twice.
+ * it away), so we won't call enable hooks twice.
*/
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
@@ -861,18 +821,17 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
encoder->bridge->funcs->enable(encoder->bridge);
}
}
-EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
static void wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane || !plane->state->fence)
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if (!plane->state->fence)
continue;
WARN_ON(!plane->state->fb);
@@ -889,16 +848,9 @@ static bool framebuffer_changed(struct drm_device *dev,
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
- int nplanes = old_state->dev->mode_config.num_total_plane;
int i;
- for (i = 0; i < nplanes; i++) {
- plane = old_state->planes[i];
- old_plane_state = old_state->plane_states[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(old_state, plane, old_plane_state, i) {
if (plane->state->crtc != crtc &&
old_plane_state->crtc != crtc)
continue;
@@ -927,16 +879,9 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
int i, ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
/* No one cares about the old state, so abuse it for tracking
* and store whether we hold a vblank reference (and should do a
* vblank wait) in the ->enable boolean. */
@@ -961,11 +906,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
}
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
-
- if (!crtc || !old_crtc_state->enable)
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (!old_crtc_state->enable)
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
@@ -1014,7 +956,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
/*
* Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one conditions: It must be guaranteed
+ * any modeset locks at all under one condition: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
@@ -1030,11 +972,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
wait_for_fences(dev, state);
- drm_atomic_helper_commit_pre_planes(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_commit_post_planes(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -1085,9 +1027,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*/
/**
- * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
- * @state: atomic state object with old state structures
+ * @state: atomic state object with new state structures
*
* This function prepares plane state, specifically framebuffers, for the new
* configuration. If any failure is encountered this function will call
@@ -1103,8 +1045,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
int ret, i;
for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
struct drm_framebuffer *fb;
if (!plane)
@@ -1112,10 +1055,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
- fb = state->plane_states[i]->fb;
+ fb = plane_state->fb;
if (fb && funcs->prepare_fb) {
- ret = funcs->prepare_fb(plane, fb);
+ ret = funcs->prepare_fb(plane, fb, plane_state);
if (ret)
goto fail;
}
@@ -1125,8 +1068,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
fail:
for (i--; i >= 0; i--) {
- struct drm_plane_helper_funcs *funcs;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
struct drm_framebuffer *fb;
if (!plane)
@@ -1137,7 +1081,7 @@ fail:
fb = state->plane_states[i]->fb;
if (fb && funcs->cleanup_fb)
- funcs->cleanup_fb(plane, fb);
+ funcs->cleanup_fb(plane, fb, plane_state);
}
@@ -1161,16 +1105,14 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -1180,13 +1122,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_begin(crtc);
}
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = old_state->planes[i];
- struct drm_plane_state *old_plane_state;
-
- if (!plane)
- continue;
+ for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
funcs = plane->helper_private;
@@ -1205,12 +1142,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_update(plane, old_plane_state);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -1237,23 +1170,20 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- int nplanes = dev->mode_config.num_total_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = old_state->planes[i];
+ for_each_plane_in_state(old_state, plane, plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
struct drm_framebuffer *old_fb;
- if (!plane)
- continue;
-
funcs = plane->helper_private;
- old_fb = old_state->plane_states[i]->fb;
+ old_fb = plane_state->fb;
if (old_fb && funcs->cleanup_fb)
- funcs->cleanup_fb(plane, old_fb);
+ funcs->cleanup_fb(plane, old_fb, plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1496,8 +1426,10 @@ static int update_output_state(struct drm_atomic_state *state,
struct drm_mode_set *set)
{
struct drm_device *dev = set->crtc->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int ncrtcs = state->dev->mode_config.num_crtc;
int ret, i, j;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
@@ -1513,27 +1445,14 @@ static int update_output_state(struct drm_atomic_state *state,
return PTR_ERR(conn_state);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
}
/* Then recompute connector->crtc links and crtc enabling state. */
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = state->connectors[i];
- conn_state = state->connector_states[i];
-
- if (!connector)
- continue;
-
+ for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
NULL);
@@ -1552,13 +1471,7 @@ static int update_output_state(struct drm_atomic_state *state,
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* Don't update ->enable for the CRTC in the set_config request,
* since a mismatch would indicate a bug in the upper layers.
* The actual modeset code later on will catch any
@@ -1678,12 +1591,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_set_config);
/**
- * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default crtc set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1737,12 +1651,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
/**
- * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * drm_atomic_helper_plane_set_property - helper for plane properties
* @plane: DRM plane
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disable handler using the atomic driver interface.
+ * Provides a default plane set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1796,12 +1711,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
/**
- * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * drm_atomic_helper_connector_set_property - helper for connector properties
* @connector: DRM connector
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default connector set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1984,10 +1900,10 @@ retry:
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
list_for_each_entry(tmp_connector, &config->connector_list, head) {
- if (connector->state->crtc != crtc)
+ if (tmp_connector->state->crtc != crtc)
continue;
- if (connector->dpms == DRM_MODE_DPMS_ON) {
+ if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
active = true;
break;
}
@@ -2050,6 +1966,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ memcpy(state, crtc->state, sizeof(*state));
+
+ state->mode_changed = false;
+ state->active_changed = false;
+ state->planes_changed = false;
+ state->event = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
* drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
* @crtc: drm CRTC
*
@@ -2064,20 +2000,35 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
if (WARN_ON(!crtc->state))
return NULL;
- state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
-
- if (state) {
- state->mode_changed = false;
- state->active_changed = false;
- state->planes_changed = false;
- state->event = NULL;
- }
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_duplicate_state(crtc, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @crtc: CRTC object
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
@@ -2088,6 +2039,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2113,6 +2065,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ memcpy(state, plane->state, sizeof(*state));
+
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
@@ -2127,16 +2097,32 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
if (WARN_ON(!plane->state))
return NULL;
- state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
-
- if (state && state->fb)
- drm_framebuffer_reference(state->fb);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @plane: plane object
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
@@ -2147,9 +2133,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2173,6 +2157,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ memcpy(state, connector->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
* drm_atomic_helper_connector_duplicate_state - default state duplicate hook
* @connector: drm connector
*
@@ -2182,14 +2182,41 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
{
+ struct drm_connector_state *state;
+
if (WARN_ON(!connector->state))
return NULL;
- return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+ return state;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @connector: connector object
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
* drm_atomic_helper_connector_destroy_state - default state destroy hook
* @connector: drm connector
* @state: connector state object to release
@@ -2200,6 +2227,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
+ __drm_atomic_helper_connector_destroy_state(connector, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d1187e571c6d..eaa5790c2a6f 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -49,7 +49,7 @@ void drm_bridge_remove(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_remove);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
{
if (!dev || !bridge)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b6f076b213bc..3007b44e6bf4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -660,6 +660,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
int ret;
+ WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
+ WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
+
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
@@ -1999,21 +2002,32 @@ int drm_mode_getcrtc(struct drm_device *dev,
return -ENOENT;
drm_modeset_lock_crtc(crtc, crtc->primary);
- crtc_resp->x = crtc->x;
- crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
if (crtc->primary->fb)
crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
- if (crtc->enabled) {
-
- drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
- crtc_resp->mode_valid = 1;
+ if (crtc->state) {
+ crtc_resp->x = crtc->primary->state->src_x >> 16;
+ crtc_resp->y = crtc->primary->state->src_y >> 16;
+ if (crtc->state->enable) {
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
+ crtc_resp->mode_valid = 1;
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
} else {
- crtc_resp->mode_valid = 0;
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ if (crtc->enabled) {
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
}
drm_modeset_unlock_crtc(crtc);
@@ -2266,8 +2280,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
crtc = drm_encoder_get_crtc(encoder);
if (crtc)
enc_resp->crtc_id = crtc->base.id;
- else if (encoder->crtc)
- enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2402,6 +2414,27 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return 0;
}
+/**
+ * drm_plane_check_pixel_format - Check if the plane supports the pixel format
+ * @plane: plane to check for format support
+ * @format: the pixel format
+ *
+ * Returns:
+ * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
+ * otherwise.
+ */
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < plane->format_count; i++) {
+ if (format == plane->format_types[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* setplane_internal - setplane handler for internal callers
*
@@ -2422,7 +2455,6 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
unsigned int fb_width, fb_height;
- unsigned int i;
/* No fb means shut it down */
if (!fb) {
@@ -2445,16 +2477,24 @@ static int __setplane_internal(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- for (i = 0; i < plane->format_count; i++)
- if (fb->pixel_format == plane->format_types[i])
- break;
- if (i == plane->format_count) {
+ ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+ if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->pixel_format));
- ret = -EINVAL;
goto out;
}
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ return -ERANGE;
+ }
+
+
fb_width = fb->width << 16;
fb_height = fb->height << 16;
@@ -2539,17 +2579,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* Give drivers some help against integer overflows */
- if (plane_req->crtc_w > INT_MAX ||
- plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
- plane_req->crtc_h > INT_MAX ||
- plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->crtc_x, plane_req->crtc_y);
- return -ERANGE;
- }
-
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
@@ -2775,6 +2804,23 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ /*
+ * Check whether the primary plane supports the fb pixel format.
+ * Drivers not implementing the universal planes API use a
+ * default formats list provided by the DRM core which doesn't
+ * match real hardware capabilities. Skip the check in that
+ * case.
+ */
+ if (!crtc->primary->format_default) {
+ ret = drm_plane_check_pixel_format(crtc->primary,
+ fb->pixel_format);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format));
+ goto out;
+ }
+ }
+
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
@@ -3252,6 +3298,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
+
+ if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
}
return 0;
@@ -3266,7 +3318,7 @@ internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
@@ -3282,6 +3334,12 @@ internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ !dev->mode_config.allow_fb_modifiers) {
+ DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ return ERR_PTR(-EINVAL);
+ }
+
ret = framebuffer_check(r);
if (ret)
return ERR_PTR(ret);
@@ -5543,6 +5601,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5581,3 +5640,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index b1979e7bdc88..ab00286aec93 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use);
static void
drm_encoder_disable(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
@@ -191,7 +191,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
if (crtc_funcs->disable)
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions);
static void
drm_crtc_prepare_encoders(struct drm_device *dev)
{
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -270,9 +270,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
@@ -292,6 +292,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
saved_mode = crtc->mode;
+ saved_hwmode = crtc->hwmode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -334,6 +335,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ crtc->hwmode = *adjusted_mode;
+
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -396,9 +399,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder->bridge->funcs->enable(encoder->bridge);
}
- /* Store real post-adjustment hardware mode. */
- crtc->hwmode = *adjusted_mode;
-
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
@@ -411,6 +411,7 @@ done:
if (!ret) {
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
+ crtc->hwmode = saved_hwmode;
crtc->x = saved_x;
crtc->y = saved_y;
}
@@ -472,7 +473,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
int ret;
int i;
@@ -572,7 +573,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* a) traverse passed in connector list and get encoders for them */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -732,7 +733,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_bridge *bridge = encoder->bridge;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
@@ -794,7 +795,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
@@ -808,7 +809,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
@@ -837,6 +838,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
+ fb->modifier[i] = mode_cmd->modifier[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
@@ -869,7 +871,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
int encoder_dpms;
bool ret;
@@ -934,7 +936,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
struct drm_framebuffer *old_fb)
{
struct drm_crtc_state *crtc_state;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int ret;
if (crtc->funcs->atomic_duplicate_state)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index f1283878ff6d..71dcbc64ae98 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -427,11 +427,13 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
* retrying the transaction as appropriate. It is assumed that the
* aux->transfer function does not modify anything in the msg other than the
* reply field.
+ *
+ * Returns bytes transferred on success, or a negative error code on failure.
*/
static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry;
- int err;
+ int ret;
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
@@ -440,14 +442,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
*/
for (retry = 0; retry < 7; retry++) {
mutex_lock(&aux->hw_mutex);
- err = aux->transfer(aux, msg);
+ ret = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
- if (err < 0) {
- if (err == -EBUSY)
+ if (ret < 0) {
+ if (ret == -EBUSY)
continue;
- DRM_DEBUG_KMS("transaction failed: %d\n", err);
- return err;
+ DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+ return ret;
}
@@ -460,7 +462,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
break;
case DP_AUX_NATIVE_REPLY_NACK:
- DRM_DEBUG_KMS("native nack\n");
+ DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
@@ -488,12 +490,10 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* Both native ACK and I2C ACK replies received. We
* can assume the transfer was successful.
*/
- if (err < msg->size)
- return -EPROTO;
- return 0;
+ return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("I2C nack\n");
+ DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
@@ -513,14 +513,55 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return -EREMOTEIO;
}
+/*
+ * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
+ *
+ * Returns an error code on failure, or a recommended transfer size on success.
+ */
+static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
+{
+ int err, ret = orig_msg->size;
+ struct drm_dp_aux_msg msg = *orig_msg;
+
+ while (msg.size > 0) {
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ if (err <= 0)
+ return err == 0 ? -EPROTO : err;
+
+ if (err < msg.size && err < ret) {
+ DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n",
+ msg.size, err);
+ ret = err;
+ }
+
+ msg.size -= err;
+ msg.buffer += err;
+ }
+
+ return ret;
+}
+
+/*
+ * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
+ * packets to be as large as possible. If not, the I2C transactions never
+ * succeed. Hence the default is maximum.
+ */
+static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
+module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
+ "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
+
static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
unsigned int i, j;
+ unsigned transfer_size;
struct drm_dp_aux_msg msg;
int err = 0;
+ dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+
memset(&msg, 0, sizeof(msg));
for (i = 0; i < num; i++) {
@@ -538,20 +579,19 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
err = drm_dp_i2c_do_msg(aux, &msg);
if (err < 0)
break;
- /*
- * Many hardware implementations support FIFOs larger than a
- * single byte, but it has been empirically determined that
- * transferring data in larger chunks can actually lead to
- * decreased performance. Therefore each message is simply
- * transferred byte-by-byte.
+ /* We want each transaction to be as large as possible, but
+ * we'll go to smaller sizes if the hardware gives us a
+ * short reply.
*/
- for (j = 0; j < msgs[i].len; j++) {
+ transfer_size = dp_aux_i2c_transfer_size;
+ for (j = 0; j < msgs[i].len; j += msg.size) {
msg.buffer = msgs[i].buf + j;
- msg.size = 1;
+ msg.size = min(transfer_size, msgs[i].len - j);
- err = drm_dp_i2c_do_msg(aux, &msg);
+ err = drm_dp_i2c_drain_msg(aux, &msg);
if (err < 0)
break;
+ transfer_size = err;
}
if (err < 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 379ab4555756..132581ca4ad8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2324,6 +2324,19 @@ out:
}
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ int slots = 0;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return slots;
+
+ slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
+ return slots;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+
/**
* drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
* @mgr: manager for this port
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index d51213464672..48f7359e2a6b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -70,7 +70,7 @@ void drm_err(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+ printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
__builtin_return_address(0), &vaf);
va_end(args);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index cc0ae047ed3b..5c1aca443e54 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -304,7 +304,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
}
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1e6a0c760c5d..cac422916c7a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -238,7 +238,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
- struct drm_crtc_helper_funcs *funcs;
+ const struct drm_crtc_helper_funcs *funcs;
int i;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
@@ -285,7 +285,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *funcs;
+ const struct drm_crtc_helper_funcs *funcs;
struct drm_framebuffer *fb;
int i;
@@ -765,7 +765,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, rc = 0;
@@ -1034,23 +1034,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
- int x, y;
+ struct drm_mode_set *mode_set;
+ int x, y, j;
+ /* in case of tile group, are we the last tile vert or horiz?
+ * If no tile group you are always the last one both vertically
+ * and horizontally
+ */
+ bool lastv = true, lasth = true;
+
desired_mode = fb_helper->crtc_info[i].desired_mode;
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+
+ if (!desired_mode)
+ continue;
+
+ crtc_count++;
+
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
- if (desired_mode) {
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay + x < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay + x;
- if (desired_mode->vdisplay + y < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay + y;
- if (desired_mode->hdisplay + x > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay + x;
- if (desired_mode->vdisplay + y > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay + y;
- crtc_count++;
+
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+
+ sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
+ sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+
+ for (j = 0; j < mode_set->num_connectors; j++) {
+ struct drm_connector *connector = mode_set->connectors[j];
+ if (connector->has_tile) {
+ lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
+ lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
+ /* cloning to multiple tiles is just crazy-talk, so: */
+ break;
+ }
}
+
+ if (lasth)
+ sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ if (lastv)
+ sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
@@ -1261,12 +1283,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *mode;
bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
- return mode;
+ return NULL;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
@@ -1275,7 +1297,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
goto create_mode;
prefer_non_interlace = !cmdline_mode->interlace;
- again:
+again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
@@ -1529,7 +1551,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int c, o;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
+ const struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index f1b32f91d941..cbb4fc0fc969 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -37,6 +37,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
/**
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 2f4c4343dfa3..aa8bbb460c57 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
+static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 3785d66721f2..266dcd6cdf3b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -321,6 +321,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
else
req->value = 64;
break;
+ case DRM_CAP_ADDFB2_MODIFIERS:
+ req->value = dev->mode_config.allow_fb_modifiers;
+ break;
default:
return -EINVAL;
}
@@ -521,8 +524,13 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = { \
+ .cmd = ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -660,39 +668,29 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize;
+ unsigned int usize, asize, drv_size;
dev = file_priv->minor->dev;
if (drm_device_is_unplugged(dev))
return -ENODEV;
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) {
+ /* driver ioctl */
+ if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ goto err_i1;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
- }
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
-
+ } else {
+ /* core ioctl */
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ goto err_i1;
ioctl = &drm_ioctls[nr];
+ }
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
-
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = _IOC_SIZE(cmd);
+ asize = max(usize, drv_size);
+ cmd = ioctl->cmd;
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@@ -773,12 +771,13 @@ EXPORT_SYMBOL(drm_ioctl);
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
- }
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END)
+ return false;
+
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ return false;
- return false;
+ *flags = drm_ioctls[nr].flags;
+ return true;
}
EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 10574a0c3a55..c8a34476570a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -276,7 +276,6 @@ static void vblank_disable_fn(unsigned long arg)
void drm_vblank_cleanup(struct drm_device *dev)
{
int crtc;
- unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
@@ -285,11 +284,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
- del_timer_sync(&vblank->disable_timer);
+ WARN_ON(vblank->enabled &&
+ drm_core_check_feature(dev, DRIVER_MODESET));
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- vblank_disable_and_save(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ del_timer_sync(&vblank->disable_timer);
}
kfree(dev->vblank);
@@ -475,17 +473,23 @@ int drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
/*
- * Wake up any waiters so they don't hang.
+ * Wake up any waiters so they don't hang. This is just to paper over
+ * isssues for UMS drivers which aren't in full control of their
+ * vblank/irq handling. KMS drivers must ensure that vblanks are all
+ * disabled when uninstalling the irq handler.
*/
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
+ if (!vblank->enabled)
+ continue;
+
+ WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
+
+ vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
- vblank->enabled = false;
- vblank->last =
- dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1052,7 +1056,7 @@ EXPORT_SYMBOL(drm_vblank_get);
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * This is the native kms version of drm_vblank_off().
+ * This is the native kms version of drm_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
@@ -1233,6 +1237,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
+ * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to reset the vblank state to off at load time.
+ * Drivers should use this together with the drm_crtc_vblank_off() and
+ * drm_crtc_vblank_on() functions. The difference compared to
+ * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
+ * and hence doesn't need to call any driver hooks.
+ */
+void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
+{
+ struct drm_device *dev = drm_crtc->dev;
+ unsigned long irqflags;
+ int crtc = drm_crtc_index(drm_crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /*
+ * Prevent subsequent drm_vblank_get() from enabling the vblank
+ * interrupt by bumping the refcount.
+ */
+ if (!vblank->inmodeset) {
+ atomic_inc(&vblank->refcount);
+ vblank->inmodeset = 1;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ WARN_ON(!list_empty(&dev->vblank_event_list));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_reset);
+
+/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1653,7 +1689,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
struct timeval tvblank;
unsigned long irqflags;
- if (!dev->num_crtcs)
+ if (WARN_ON_ONCE(!dev->num_crtcs))
return false;
if (WARN_ON(crtc >= dev->num_crtcs))
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 487d0e35c134..213b11ea69b5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -278,7 +278,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
hblank = drm_mode->hdisplay * hblank_percentage /
(100 * HV_FACTOR - hblank_percentage);
hblank -= hblank % (2 * CVT_H_GRANULARITY);
- /* 14. find the total pixes per line */
+ /* 14. find the total pixels per line */
drm_mode->htotal = drm_mode->hdisplay + hblank;
drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
drm_mode->hsync_start = drm_mode->hsync_end -
@@ -903,6 +903,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
+ if (!mode1 && !mode2)
+ return true;
+
+ if (!mode1 || !mode2)
+ return false;
+
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
@@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
- * @merge_type_bits: whether to merge or overright type bits.
+ * @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
@@ -1209,7 +1215,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* The intermediate drm_cmdline_mode structure is required to store additional
- * options from the command line modline like the force-enabel/disable flag.
+ * options from the command line modline like the force-enable/disable flag.
*
* Returns:
* True if a valid modeline has been parsed, false otherwise.
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 16150a00c237..aaa130736bf8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -43,14 +43,10 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
- struct device_node *remote_port, *ep = NULL;
+ struct device_node *remote_port, *ep;
uint32_t possible_crtcs = 0;
- do {
- ep = of_graph_get_next_endpoint(port, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(port, ep) {
remote_port = of_graph_get_remote_port(ep);
if (!remote_port) {
of_node_put(ep);
@@ -60,7 +56,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
- } while (1);
+ }
return possible_crtcs;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index fd29f03645b8..1b1bd42b0368 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -27,6 +27,7 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5ba5792bfdba..40c1db9ad7c3 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -344,20 +344,7 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
-/**
- * drm_primary_helper_create_plane() - Create a generic primary plane
- * @dev: drm device
- * @formats: pixel formats supported, or NULL for a default safe list
- * @num_formats: size of @formats; ignored if @formats is NULL
- *
- * Allocates and initializes a primary plane that can be used with the primary
- * plane helpers. Drivers that wish to use driver-specific plane structures or
- * provide custom handler functions may perform their own allocation and
- * initialization rather than calling this function.
- */
-struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
- const uint32_t *formats,
- int num_formats)
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
{
struct drm_plane *primary;
int ret;
@@ -368,15 +355,17 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
return NULL;
}
- if (formats == NULL) {
- formats = safe_modeset_formats;
- num_formats = ARRAY_SIZE(safe_modeset_formats);
- }
+ /*
+ * Remove the format_default field from drm_plane when dropping
+ * this helper.
+ */
+ primary->format_default = true;
/* possible_crtc's will be filled in later by crtc_init */
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
- formats, num_formats,
+ safe_modeset_formats,
+ ARRAY_SIZE(safe_modeset_formats),
DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
@@ -385,7 +374,6 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
return primary;
}
-EXPORT_SYMBOL(drm_primary_helper_create_plane);
/**
* drm_crtc_init - Legacy CRTC initialization function
@@ -404,7 +392,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
{
struct drm_plane *primary;
- primary = drm_primary_helper_create_plane(dev, NULL, 0);
+ primary = create_primary_plane(dev);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
@@ -413,9 +401,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
struct drm_plane_state *plane_state,
struct drm_framebuffer *old_fb)
{
- struct drm_plane_helper_funcs *plane_funcs;
+ const struct drm_plane_helper_funcs *plane_funcs;
struct drm_crtc *crtc[2];
- struct drm_crtc_helper_funcs *crtc_funcs[2];
+ const struct drm_crtc_helper_funcs *crtc_funcs[2];
int i, ret = 0;
plane_funcs = plane->helper_private;
@@ -437,7 +425,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
if (plane_funcs->prepare_fb && plane_state->fb &&
plane_state->fb != old_fb) {
- ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+ ret = plane_funcs->prepare_fb(plane, plane_state->fb,
+ plane_state);
if (ret)
goto out;
}
@@ -487,7 +476,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
}
if (plane_funcs->cleanup_fb && old_fb)
- plane_funcs->cleanup_fb(plane, old_fb);
+ plane_funcs->cleanup_fb(plane, old_fb, plane_state);
out:
if (plane_state) {
if (plane->funcs->atomic_destroy_state)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3fee587bc284..63503879a676 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -98,7 +98,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5c99d3773212..ffc305fc2076 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -166,23 +166,68 @@ void drm_sysfs_destroy(void)
/*
* Connector properties
*/
-static ssize_t status_show(struct device *device,
+static ssize_t status_store(struct device *device,
struct device_attribute *attr,
- char *buf)
+ const char *buf, size_t count)
{
struct drm_connector *connector = to_drm_connector(device);
- enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
int ret;
- ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
- status = connector->funcs->detect(connector, true);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ old_status = connector->status;
+
+ if (sysfs_streq(buf, "detect")) {
+ connector->force = 0;
+ connector->status = connector->funcs->detect(connector, true);
+ } else if (sysfs_streq(buf, "on")) {
+ connector->force = DRM_FORCE_ON;
+ } else if (sysfs_streq(buf, "on-digital")) {
+ connector->force = DRM_FORCE_ON_DIGITAL;
+ } else if (sysfs_streq(buf, "off")) {
+ connector->force = DRM_FORCE_OFF;
+ } else
+ ret = -EINVAL;
+
+ if (ret == 0 && connector->force) {
+ if (connector->force == DRM_FORCE_ON ||
+ connector->force == DRM_FORCE_ON_DIGITAL)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ }
+
+ if (old_status != connector->status) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ connector->name,
+ old_status, connector->status);
+
+ dev->mode_config.delayed_event = true;
+ if (dev->mode_config.poll_enabled)
+ schedule_delayed_work(&dev->mode_config.output_poll_work,
+ 0);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static ssize_t status_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(status));
+ drm_get_connector_status_name(connector->status));
}
static ssize_t dpms_show(struct device *device,
@@ -339,7 +384,7 @@ static ssize_t select_subconnector_show(struct device *device,
drm_get_dvi_i_select_name((int)subconnector));
}
-static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RW(status);
static DEVICE_ATTR_RO(enabled);
static DEVICE_ATTR_RO(dpms);
static DEVICE_ATTR_RO(modes);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 4a2c328959e5..aab49ee4ed40 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#endif
#include <asm/pgtable.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
struct drm_vma_entry {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 970046199608..1f7e33f59de6 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -28,6 +28,7 @@
#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
@@ -41,32 +42,16 @@
#define WINDOWS_NR 2
-struct decon_win_data {
- unsigned int ovl_x;
- unsigned int ovl_y;
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- bool enabled;
- bool resume;
-};
-
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
struct clk *vclk;
void __iomem *regs;
- struct decon_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
bool i80_if;
@@ -296,59 +281,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void decon_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win, padding;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
-
- win_data = &ctx->win_data[win];
-
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
- win_data->offset_x = plane->fb_x;
- win_data->offset_y = plane->fb_y;
- win_data->fb_width = plane->fb_width + padding;
- win_data->fb_height = plane->fb_height;
- win_data->ovl_x = plane->crtc_x;
- win_data->ovl_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->dma_addr = plane->dma_addr[0];
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
{
- struct decon_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
+ int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -397,7 +339,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -407,7 +349,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -435,7 +378,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 bits, val;
@@ -449,12 +392,12 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.mode;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
+ int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
@@ -462,17 +405,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -490,39 +430,41 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
decon_shadow_protect_win(ctx, win, true);
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ val = (unsigned long)plane->dma_addr[0];
writel(val, ctx->regs + VIDW_BUF_START(win));
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+
/* buffer size */
- writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win));
- writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
- writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win));
- writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win));
+ writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
+ writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEBUG_KMS("start addr = 0x%lx\n",
- (unsigned long)win_data->dma_addr);
+ (unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
- if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay)
- win_data->ovl_x = mode->hdisplay - win_data->ovl_width;
- if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay)
- win_data->ovl_y = mode->vdisplay - win_data->ovl_height;
+ if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
+ plane->crtc_x = mode->hdisplay - plane->crtc_width;
+ if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
+ plane->crtc_y = mode->vdisplay - plane->crtc_height;
- val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->ovl_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->ovl_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->ovl_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -531,7 +473,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->ovl_x, win_data->ovl_y, last_x, last_y);
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -565,27 +507,23 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = true;
+ plane->enabled = true;
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
u32 val;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -604,42 +542,42 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void decon_window_suspend(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
decon_win_disable(ctx->crtc, i);
}
}
static void decon_window_resume(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void decon_apply(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
decon_win_commit(ctx->crtc, i);
else
decon_win_disable(ctx->crtc, i);
@@ -779,7 +717,6 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
- .win_mode_set = decon_win_mode_set,
.win_commit = decon_win_commit,
.win_disable = decon_win_disable,
};
@@ -818,6 +755,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
@@ -826,8 +766,18 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index bf17a60b40ed..1dbfba58f909 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,10 +32,16 @@
#include <drm/bridge/ptn3460.h>
#include "exynos_dp_core.h"
+#include "exynos_drm_fimd.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
+{
+ return to_exynos_crtc(dp->encoder->crtc);
+}
+
static inline struct exynos_dp_device *
display_to_dp(struct exynos_drm_display *d)
{
@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
}
}
+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
+
if (dp->panel) {
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 48ccab7fdf63..eb49195cec5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -34,9 +34,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
- !atomic_read(&exynos_crtc->pending_flip),
- HZ/20))
- atomic_set(&exynos_crtc->pending_flip, 0);
+ (exynos_crtc->event == NULL), HZ/20))
+ exynos_crtc->event = NULL;
drm_crtc_vblank_off(crtc);
}
@@ -164,11 +163,10 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
unsigned int crtc_w, crtc_h;
- int ret = -EINVAL;
+ int ret;
/* when the page flip is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
@@ -176,48 +174,49 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
+ if (!event)
+ return -EINVAL;
- if (event) {
- /*
- * the pipe from user always is 0 so we can set pipe number
- * of current owner to event.
- */
- event->pipe = exynos_crtc->pipe;
+ spin_lock_irq(&dev->event_lock);
+ if (exynos_crtc->event) {
+ ret = -EBUSY;
+ goto out;
+ }
- ret = drm_vblank_get(dev, exynos_crtc->pipe);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter\n");
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter\n");
+ goto out;
+ }
- goto out;
- }
+ exynos_crtc->event = event;
+ spin_unlock_irq(&dev->event_lock);
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
+ */
+ event->pipe = exynos_crtc->pipe;
+
+ crtc->primary->fb = fb;
+ crtc_w = fb->width - crtc->x;
+ crtc_h = fb->height - crtc->y;
+ ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, crtc->x, crtc->y,
+ crtc_w, crtc_h);
+ if (ret) {
+ crtc->primary->fb = old_fb;
spin_lock_irq(&dev->event_lock);
- list_add_tail(&event->base.link,
- &dev_priv->pageflip_event_list);
- atomic_set(&exynos_crtc->pending_flip, 1);
+ exynos_crtc->event = NULL;
+ drm_vblank_put(dev, exynos_crtc->pipe);
spin_unlock_irq(&dev->event_lock);
-
- crtc->primary->fb = fb;
- crtc_w = fb->width - crtc->x;
- crtc_h = fb->height - crtc->y;
- ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
- crtc_w, crtc_h, crtc->x, crtc->y,
- crtc_w, crtc_h);
- if (ret) {
- crtc->primary->fb = old_fb;
-
- spin_lock_irq(&dev->event_lock);
- drm_vblank_put(dev, exynos_crtc->pipe);
- list_del(&event->base.link);
- atomic_set(&exynos_crtc->pending_flip, 0);
- spin_unlock_irq(&dev->event_lock);
-
- goto out;
- }
+ return ret;
}
+
+ return 0;
+
out:
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock_irq(&dev->event_lock);
return ret;
}
@@ -239,13 +238,13 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
- struct drm_plane *plane;
struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
@@ -255,19 +254,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
- atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
- plane = exynos_plane_init(drm_dev, 1 << pipe,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto err_plane;
- }
crtc = &exynos_crtc->base;
@@ -284,7 +276,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
err_crtc:
plane->funcs->destroy(plane);
-err_plane:
kfree(exynos_crtc);
return ERR_PTR(ret);
}
@@ -320,26 +311,20 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
+ if (exynos_crtc->event) {
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (pipe != e->pipe)
- continue;
-
- list_del(&e->base.link);
- drm_send_vblank_event(dev, -1, e);
+ drm_send_vblank_event(dev, -1, exynos_crtc->event);
drm_vblank_put(dev, pipe);
- atomic_set(&exynos_crtc->pending_flip, 0);
wake_up(&exynos_crtc->pending_flip_queue);
+
}
+ exynos_crtc->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6258b800aab8..0ecd8fc45cff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -18,6 +18,7 @@
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
@@ -27,12 +28,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
-void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
- struct exynos_drm_plane *plane);
-void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
-
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 90168d7cf66a..8ac465208eae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,13 +55,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
int ret;
- int nr;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private)
return -ENOMEM;
- INIT_LIST_HEAD(&private->pageflip_event_list);
dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
@@ -81,19 +79,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
- for (nr = 0; nr < MAX_PLANE; nr++) {
- struct drm_plane *plane;
- unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
-
- plane = exynos_plane_init(dev, possible_crtcs,
- DRM_PLANE_TYPE_OVERLAY);
- if (!IS_ERR(plane))
- continue;
-
- ret = PTR_ERR(plane);
- goto err_mode_config_cleanup;
- }
-
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
@@ -237,25 +222,13 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
if (!file->driver_priv)
return;
- /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
- base.link) {
- if (v->base.file_priv == file) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
- }
-
/* Release all events handled by page flip handler but not freed. */
list_for_each_entry_safe(e, et, &file->event_list, link) {
list_del(&e->link);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 9afd390d4674..e12ecb5d5d9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -21,7 +21,6 @@
#define MAX_CRTC 3
#define MAX_PLANE 5
#define MAX_FB_BUFFER 4
-#define DEFAULT_ZPOS -1
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
@@ -48,20 +47,22 @@ enum exynos_drm_output_type {
* Exynos drm common overlay structure.
*
* @base: plane object
- * @fb_x: offset x on a framebuffer to be displayed.
+ * @src_x: offset x on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed.
+ * @src_y: offset y on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
* @src_width: width of a partial image to be displayed from framebuffer.
* @src_height: height of a partial image to be displayed from framebuffer.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
* @crtc_height: window height to be displayed (hardware screen).
* @mode_width: width of screen mode.
* @mode_height: height of screen mode.
+ * @h_ratio: horizontal scaling ratio, 16.16 fixed point
+ * @v_ratio: vertical scaling ratio, 16.16 fixed point
* @refresh: refresh rate.
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
@@ -78,6 +79,7 @@ enum exynos_drm_output_type {
* @transparency: transparency on or off.
* @activated: activated or not.
* @enabled: enabled or not.
+ * @resume: to resume or not.
*
* this structure is common to exynos SoC and its contents would be copied
* to hardware specific overlay info.
@@ -85,25 +87,27 @@ enum exynos_drm_output_type {
struct exynos_drm_plane {
struct drm_plane base;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_height;
+ unsigned int src_x;
+ unsigned int src_y;
unsigned int src_width;
unsigned int src_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
unsigned int crtc_height;
unsigned int mode_width;
unsigned int mode_height;
+ unsigned int h_ratio;
+ unsigned int v_ratio;
unsigned int refresh;
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- int zpos;
+ unsigned int zpos;
unsigned int index_color;
bool default_win:1;
@@ -112,6 +116,7 @@ struct exynos_drm_plane {
bool transparency:1;
bool activated:1;
bool enabled:1;
+ bool resume:1;
};
/*
@@ -172,9 +177,7 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
- * @win_mode_set: copy drm overlay info to hw specific overlay info.
* @win_commit: apply hardware specific overlay data to registers.
- * @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
@@ -189,11 +192,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
- void (*win_mode_set)(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane);
- void (*win_commit)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_enable)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_disable)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+ void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
void (*te_handler)(struct exynos_drm_crtc *crtc);
};
@@ -210,6 +210,7 @@ struct exynos_drm_crtc_ops {
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
+ * @event: vblank event that is currently queued for flip
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
*/
@@ -219,7 +220,7 @@ struct exynos_drm_crtc {
unsigned int pipe;
unsigned int dpms;
wait_queue_head_t pending_flip_queue;
- atomic_t pending_flip;
+ struct drm_pending_vblank_event *event;
struct exynos_drm_crtc_ops *ops;
void *ctx;
};
@@ -249,9 +250,6 @@ struct drm_exynos_file_private {
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
- /* list head for new event to be added. */
- struct list_head pageflip_event_list;
-
/*
* created crtc object would be contained at this array and
* this array is used to be aware of which crtc did it request vblank.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 05fe93dc57a8..04927153bf38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1473,12 +1473,6 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
return 0;
}
-static int exynos_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *
exynos_dsi_best_encoder(struct drm_connector *connector)
{
@@ -1489,7 +1483,6 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
- .mode_valid = exynos_dsi_mode_valid,
.best_encoder = exynos_dsi_best_encoder,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d346d1e6eda0..929cb03a8eab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -151,10 +151,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- return ERR_PTR(-EINVAL);
- }
+ if (ret < 0)
+ return ERR_PTR(ret);
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb)
@@ -250,10 +248,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ if (ret < 0)
goto err_unreference;
- }
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 84f8dfe1c5ec..e71e331f0188 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,6 +76,7 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes,
struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
@@ -85,7 +86,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
unsigned long offset;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
/* RGB formats use only one buffer */
buffer = exynos_drm_fb_buffer(fb, 0);
@@ -189,7 +190,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto err_destroy_framebuffer;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb);
+ ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
if (ret < 0)
goto err_dealloc_cmap;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33a10ce967ea..9819fa6a9e2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,9 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#include "exynos_drm_fimd.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -54,6 +56,9 @@
/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
+#define VIDWnALPHA0(win) (VIDW_ALPHA + 0x00 + (win) * 8)
+#define VIDWnALPHA1(win) (VIDW_ALPHA + 0x04 + (win) * 8)
+
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
@@ -140,32 +145,15 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.has_vtsel = 1,
};
-struct fimd_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int fb_pitch;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
- bool resume;
-};
-
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
struct regmap *sysreg;
- struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
u32 vidcon0;
@@ -502,59 +490,9 @@ static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_pitch = plane->pitch;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
- win_data->buf_offsize =
- plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
{
- struct fimd_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = WINCONx_ENWIN;
@@ -564,11 +502,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
- win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ if (plane->pixel_format == DRM_FORMAT_ARGB8888)
+ plane->pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -604,7 +542,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -614,12 +552,30 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
writel(val, ctx->regs + WINCON(win));
+
+ /* hardware window 0 doesn't support alpha channel. */
+ if (win != 0) {
+ /* OSD alpha */
+ val = VIDISD14C_ALPHA0_R(0xf) |
+ VIDISD14C_ALPHA0_G(0xf) |
+ VIDISD14C_ALPHA0_B(0xf) |
+ VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
+ writel(val, ctx->regs + VIDOSD_C(win));
+
+ val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
+ VIDW_ALPHA_G(0xf);
+ writel(val, ctx->regs + VIDWnALPHA0(win));
+ writel(val, ctx->regs + VIDWnALPHA1(win));
+ }
}
static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
@@ -642,7 +598,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void fimd_shadow_protect_win(struct fimd_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 reg, bits, val;
@@ -662,29 +618,25 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
- unsigned long val, alpha, size;
- unsigned int last_x;
- unsigned int last_y;
+ struct exynos_drm_plane *plane;
+ dma_addr_t dma_addr;
+ unsigned long val, size, offset;
+ unsigned int last_x, last_y, buf_offsize, line_size;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -701,38 +653,45 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* protect windows */
fimd_shadow_protect_win(ctx, win, true);
+
+ offset = plane->src_x * (plane->bpp >> 3);
+ offset += plane->src_y * plane->pitch;
+
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ dma_addr = plane->dma_addr[0] + offset;
+ val = (unsigned long)dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
- val = (unsigned long)(win_data->dma_addr + size);
+ size = plane->pitch * plane->crtc_height;
+ val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->dma_addr, val, size);
+ (unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/* buffer size */
- val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
- VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
+ buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
+ line_size = plane->crtc_width * (plane->bpp >> 3);
+ val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
- val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
- VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y) |
+ VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->offset_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->offset_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -742,24 +701,14 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y, last_x, last_y);
-
- /* hardware window 0 doesn't support alpha channel. */
- if (win != 0) {
- /* OSD alpha */
- alpha = VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(alpha, ctx->regs + VIDOSD_C(win));
- }
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = win_data->ovl_width * win_data->ovl_height;
+ val = plane->crtc_width * plane->crtc_height;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
@@ -779,29 +728,25 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* Enable DMA channel and unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = true;
+ plane->enabled = true;
if (ctx->i80_if)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -816,42 +761,42 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
/* unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void fimd_window_suspend(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
fimd_win_disable(ctx->crtc, i);
}
}
static void fimd_window_resume(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void fimd_apply(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
fimd_win_commit(ctx->crtc, i);
else
fimd_win_disable(ctx->crtc, i);
@@ -1008,7 +953,6 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
- .win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
.te_handler = fimd_te_handler,
@@ -1054,14 +998,29 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&fimd_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc))
+ return PTR_ERR(ctx->crtc);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
@@ -1233,6 +1192,24 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+ struct fimd_context *ctx = crtc->ctx;
+ u32 val;
+
+ /*
+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+ * clock. On these SoCs the bootloader may enable it but any
+ * power domain off/on will reset it to disable state.
+ */
+ if (ctx->driver_data != &exynos5_fimd_driver_data)
+ return;
+
+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
+
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
new file mode 100644
index 000000000000..b4fcaa568456
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMD_H_
+#define _EXYNOS_DRM_FIMD_H_
+
+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
+
+#endif /* _EXYNOS_DRM_FIMD_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d5ad17dfc24d..b7f1cbc46cc2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -476,6 +476,45 @@ err_clear:
return ret;
}
+static int ipp_validate_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_config *ipp_cfg;
+ unsigned int num_plane;
+ unsigned long min_size, size;
+ unsigned int bpp;
+ int i;
+
+ /* The property id should already be varified */
+ ipp_cfg = &c_node->property.config[m_node->prop_id];
+ num_plane = drm_format_num_planes(ipp_cfg->fmt);
+
+ /**
+ * This is a rather simplified validation of a memory node.
+ * It basically verifies provided gem object handles
+ * and the buffer sizes with respect to current configuration.
+ * This is not the best that can be done
+ * but it seems more than enough
+ */
+ for (i = 0; i < num_plane; ++i) {
+ if (!m_node->buf_info.handles[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
+ min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3;
+ size = exynos_drm_gem_get_size(drm_dev,
+ m_node->buf_info.handles[i],
+ c_node->filp);
+ if (min_size > size) {
+ DRM_ERROR("invalid size for plane %d\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
@@ -552,6 +591,11 @@ static struct drm_exynos_ipp_mem_node
}
mutex_lock(&c_node->mem_lock);
+ if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+ }
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8ad5b7294eb4..13ea3349363b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -92,7 +92,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
unsigned int actual_w;
unsigned int actual_h;
@@ -111,13 +110,17 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_y = 0;
}
+ /* set ratio */
+ exynos_plane->h_ratio = (src_w << 16) / crtc_w;
+ exynos_plane->v_ratio = (src_h << 16) / crtc_h;
+
/* set drm framebuffer data. */
- exynos_plane->fb_x = src_x;
- exynos_plane->fb_y = src_y;
+ exynos_plane->src_x = src_x;
+ exynos_plane->src_y = src_y;
+ exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
+ exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
exynos_plane->fb_width = fb->width;
exynos_plane->fb_height = fb->height;
- exynos_plane->src_width = src_w;
- exynos_plane->src_height = src_h;
exynos_plane->bpp = fb->bits_per_pixel;
exynos_plane->pitch = fb->pitches[0];
exynos_plane->pixel_format = fb->pixel_format;
@@ -139,9 +142,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
exynos_plane->crtc_width, exynos_plane->crtc_height);
plane->crtc = crtc;
-
- if (exynos_crtc->ops->win_mode_set)
- exynos_crtc->ops->win_mode_set(exynos_crtc, exynos_plane);
}
int
@@ -182,39 +182,14 @@ static int exynos_disable_plane(struct drm_plane *plane)
return 0;
}
-static void exynos_plane_destroy(struct drm_plane *plane)
-{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
-
- exynos_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(exynos_plane);
-}
-
-static int exynos_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_private *dev_priv = dev->dev_private;
-
- if (property == dev_priv->plane_zpos_property) {
- exynos_plane->zpos = val;
- return 0;
- }
-
- return -EINVAL;
-}
-
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = exynos_update_plane,
.disable_plane = exynos_disable_plane,
- .destroy = exynos_plane_destroy,
- .set_property = exynos_plane_set_property,
+ .destroy = drm_plane_cleanup,
};
-static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_device *dev = plane->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -222,41 +197,36 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
prop = dev_priv->plane_zpos_property;
if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 0,
- MAX_PLANE - 1);
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE,
+ "zpos", 0, MAX_PLANE - 1);
if (!prop)
return;
dev_priv->plane_zpos_property = prop;
}
- drm_object_attach_property(&plane->base, prop, 0);
+ drm_object_attach_property(&plane->base, prop, zpos);
}
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type)
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos)
{
- struct exynos_drm_plane *exynos_plane;
int err;
- exynos_plane = kzalloc(sizeof(struct exynos_drm_plane), GFP_KERNEL);
- if (!exynos_plane)
- return ERR_PTR(-ENOMEM);
-
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats,
ARRAY_SIZE(formats), type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
- kfree(exynos_plane);
- return ERR_PTR(err);
+ return err;
}
- if (type == DRM_PLANE_TYPE_PRIMARY)
- exynos_plane->zpos = DEFAULT_ZPOS;
- else
- exynos_plane_attach_zpos_property(&exynos_plane->base);
+ exynos_plane->zpos = zpos;
- return &exynos_plane->base;
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 9d3c374e7b3e..f360590d1412 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -20,6 +20,7 @@ int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type);
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b886972b5888..27e84ec21694 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_vidi.h"
@@ -32,20 +33,6 @@
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
-struct vidi_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
-};
-
struct vidi_context {
struct exynos_drm_display display;
struct platform_device *pdev;
@@ -53,7 +40,7 @@ struct vidi_context {
struct exynos_drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
- struct vidi_win_data win_data[WINDOWS_NR];
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
unsigned int default_win;
@@ -97,19 +84,6 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static void vidi_apply(struct vidi_context *ctx)
-{
- struct exynos_drm_crtc_ops *crtc_ops = ctx->crtc->ops;
- struct vidi_win_data *win_data;
- int i;
-
- for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled && (crtc_ops && crtc_ops->win_commit))
- crtc_ops->win_commit(ctx->crtc, i);
- }
-}
-
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -143,104 +117,46 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
ctx->vblank_on = false;
}
-static void vidi_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
- (plane->bpp >> 3);
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- /*
- * some parts of win_data should be transferred to user side
- * through specific ioctl.
- */
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- win_data->enabled = true;
+ plane->enabled = true;
- DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
}
-static void vidi_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
- win_data->enabled = false;
+ plane = &ctx->planes[win];
+ plane->enabled = false;
/* TODO. */
}
static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
+ struct exynos_drm_plane *plane;
+ int i;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
@@ -253,7 +169,11 @@ static int vidi_power_on(struct vidi_context *ctx, bool enable)
if (test_and_clear_bit(0, &ctx->irq_flags))
vidi_enable_vblank(ctx->crtc);
- vidi_apply(ctx);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ plane = &ctx->planes[i];
+ if (plane->enabled)
+ vidi_win_commit(ctx->crtc, i);
+ }
} else {
ctx->suspended = true;
}
@@ -301,7 +221,6 @@ static struct exynos_drm_crtc_ops vidi_crtc_ops = {
.dpms = vidi_dpms,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
- .win_mode_set = vidi_win_mode_set,
.win_commit = vidi_win_commit,
.win_disable = vidi_win_disable,
};
@@ -543,12 +462,25 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
vidi_ctx_initialize(ctx, drm_dev);
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_VIDI,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI,
&vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
DRM_ERROR("failed to create crtc.\n");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 229b3613c60b..5eba971f394a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2007,7 +2007,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
m->hdisplay, m->vdisplay,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
- "INTERLACED" : "PROGERESSIVE");
+ "INTERLACED" : "PROGRESSIVE");
/* preserve mode information for later use. */
drm_mode_copy(&hdata->current_mode, mode);
@@ -2101,7 +2101,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_encoder *encoder = hdata->encoder;
struct drm_crtc *crtc = encoder->crtc;
- struct drm_crtc_helper_funcs *funcs = NULL;
+ const struct drm_crtc_helper_funcs *funcs = NULL;
DRM_DEBUG_KMS("mode %d\n", mode);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2e3bc57ea50e..fbec750574e6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,35 +37,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
#include "exynos_mixer.h"
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
-struct hdmi_win_data {
- dma_addr_t dma_addr;
- dma_addr_t chroma_dma_addr;
- uint32_t pixel_format;
- unsigned int bpp;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_pitch;
- unsigned int fb_height;
- unsigned int src_width;
- unsigned int src_height;
- unsigned int mode_width;
- unsigned int mode_height;
- unsigned int scan_flags;
- bool enabled;
- bool resume;
-};
-
struct mixer_resources {
int irq;
void __iomem *mixer_regs;
@@ -90,6 +68,7 @@ struct mixer_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
bool interlace;
bool powered;
@@ -99,7 +78,6 @@ struct mixer_context {
struct mutex mixer_mutex;
struct mixer_resources mixer_res;
- struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -289,7 +267,7 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
/* choosing between interlace and progressive mode */
val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
- MXR_CFG_SCAN_PROGRASSIVE);
+ MXR_CFG_SCAN_PROGRESSIVE);
if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
/* choosing between proper HD and SD mode */
@@ -403,17 +381,16 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
unsigned int buf_num = 1;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
bool crcb_mode = false;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
@@ -421,35 +398,31 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
/* TODO: single buffer format NV12, NV21 */
default:
/* ignore pixel format at disable time */
- if (!win_data->dma_addr)
+ if (!plane->dma_addr[0])
break;
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- win_data->pixel_format);
+ plane->pixel_format);
return;
}
- /* scaling feature: (src << 16) / dst */
- x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
- y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
-
if (buf_num == 2) {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->chroma_dma_addr;
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[1];
} else {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->dma_addr
- + (win_data->fb_pitch * win_data->fb_height);
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[0]
+ + (plane->pitch * plane->fb_height);
}
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
- chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
+ luma_addr[1] = luma_addr[0] + plane->pitch;
+ chroma_addr[1] = chroma_addr[0] + plane->pitch;
}
} else {
ctx->interlace = false;
@@ -470,30 +443,30 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
- VP_IMG_VSIZE(win_data->fb_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
- VP_IMG_VSIZE(win_data->fb_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
- vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
+ vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(win_data->fb_x));
- vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
+ VP_SRC_H_POSITION_VAL(plane->src_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
- vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
- vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
+ vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
}
- vp_reg_write(res, VP_H_RATIO, x_ratio);
- vp_reg_write(res, VP_V_RATIO, y_ratio);
+ vp_reg_write(res, VP_H_RATIO, plane->h_ratio);
+ vp_reg_write(res, VP_V_RATIO, plane->v_ratio);
vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
@@ -503,8 +476,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -521,25 +494,49 @@ static void mixer_layer_update(struct mixer_context *ctx)
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
+static int mixer_setup_scale(const struct exynos_drm_plane *plane,
+ unsigned int *x_ratio, unsigned int *y_ratio)
+{
+ if (plane->crtc_width != plane->src_width) {
+ if (plane->crtc_width == 2 * plane->src_width)
+ *x_ratio = 1;
+ else
+ goto fail;
+ }
+
+ if (plane->crtc_height != plane->src_height) {
+ if (plane->crtc_height == 2 * plane->src_height)
+ *y_ratio = 1;
+ else
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
+ return -ENOTSUPP;
+}
+
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
+ unsigned int x_ratio = 0, y_ratio = 0;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
#define RGB565 4
#define ARGB1555 5
#define ARGB4444 6
#define ARGB8888 7
- switch (win_data->bpp) {
+ switch (plane->bpp) {
case 16:
fmt = ARGB4444;
break;
@@ -550,21 +547,21 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- /* 2x scaling feature */
- x_ratio = 0;
- y_ratio = 0;
+ /* check if mixer supports requested scaling setup */
+ if (mixer_setup_scale(plane, &x_ratio, &y_ratio))
+ return;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ dst_x_offset = plane->crtc_x;
+ dst_y_offset = plane->crtc_y;
/* converting dma address base and source offset */
- dma_addr = win_data->dma_addr
- + (win_data->fb_x * win_data->bpp >> 3)
- + (win_data->fb_y * win_data->fb_pitch);
+ dma_addr = plane->dma_addr[0]
+ + (plane->src_x * plane->bpp >> 3)
+ + (plane->src_y * plane->pitch);
src_x_offset = 0;
src_y_offset = 0;
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
ctx->interlace = true;
else
ctx->interlace = false;
@@ -578,18 +575,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
- win_data->fb_pitch / (win_data->bpp >> 3));
+ plane->pitch / (plane->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(win_data->mode_height);
- val |= MXR_MXR_RES_WIDTH(win_data->mode_width);
+ val = MXR_MXR_RES_HEIGHT(plane->mode_height);
+ val |= MXR_MXR_RES_WIDTH(plane->mode_width);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
- val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
+ val = MXR_GRP_WH_WIDTH(plane->src_width);
+ val |= MXR_GRP_WH_HEIGHT(plane->src_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -607,8 +604,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
@@ -920,63 +917,9 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct mixer_context *mixer_ctx = crtc->ctx;
- struct hdmi_win_data *win_data;
- int win;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
- plane->fb_width, plane->fb_height,
- plane->fb_x, plane->fb_y,
- plane->crtc_width, plane->crtc_height,
- plane->crtc_x, plane->crtc_y);
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win >= MIXER_WIN_NR) {
- DRM_ERROR("mixer window[%d] is wrong\n", win);
- return;
- }
-
- win_data = &mixer_ctx->win_data[win];
-
- win_data->dma_addr = plane->dma_addr[0];
- win_data->chroma_dma_addr = plane->dma_addr[1];
- win_data->pixel_format = plane->pixel_format;
- win_data->bpp = plane->bpp;
-
- win_data->crtc_x = plane->crtc_x;
- win_data->crtc_y = plane->crtc_y;
- win_data->crtc_width = plane->crtc_width;
- win_data->crtc_height = plane->crtc_height;
-
- win_data->fb_x = plane->fb_x;
- win_data->fb_y = plane->fb_y;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->fb_pitch = plane->pitch;
- win_data->src_width = plane->src_width;
- win_data->src_height = plane->src_height;
-
- win_data->mode_width = plane->mode_width;
- win_data->mode_height = plane->mode_height;
-
- win_data->scan_flags = plane->scan_flag;
-}
-
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -992,14 +935,13 @@ static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
else
mixer_graph_buffer(mixer_ctx, win);
- mixer_ctx->win_data[win].enabled = true;
+ mixer_ctx->planes[win].enabled = true;
}
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -1007,7 +949,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
mutex_unlock(&mixer_ctx->mixer_mutex);
- mixer_ctx->win_data[win].resume = false;
+ mixer_ctx->planes[win].resume = false;
return;
}
mutex_unlock(&mixer_ctx->mixer_mutex);
@@ -1020,7 +962,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
- mixer_ctx->win_data[win].enabled = false;
+ mixer_ctx->planes[win].enabled = false;
}
static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
@@ -1057,12 +999,12 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
mixer_win_disable(ctx->crtc, i);
}
mixer_wait_for_vblank(ctx->crtc);
@@ -1070,14 +1012,14 @@ static void mixer_window_suspend(struct mixer_context *ctx)
static void mixer_window_resume(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
+ if (plane->enabled)
mixer_win_commit(ctx->crtc, i);
}
}
@@ -1189,7 +1131,6 @@ static struct exynos_drm_crtc_ops mixer_crtc_ops = {
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
- .win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
};
@@ -1253,15 +1194,28 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_HDMI,
- &mixer_crtc_ops, ctx);
+ for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+ type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[MIXER_DEFAULT_WIN];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
+ &mixer_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
mixer_ctx_remove(ctx);
ret = PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 5f32e1a29411..ac60260c2389 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -101,7 +101,7 @@
#define MXR_CFG_GRP0_ENABLE (1 << 4)
#define MXR_CFG_VP_ENABLE (1 << 3)
#define MXR_CFG_SCAN_INTERLACE (0 << 2)
-#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_PROGRESSIVE (1 << 2)
#define MXR_CFG_SCAN_NTSC (0 << 1)
#define MXR_CFG_SCAN_PAL (1 << 1)
#define MXR_CFG_SCAN_SD (0 << 0)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 66727328832d..7d47b3d5cc0d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -823,7 +823,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 4268bf210034..6b1d3340ba14 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -195,7 +195,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
return -1;
} else {
- struct drm_encoder_helper_funcs *helpers
+ const struct drm_encoder_helper_funcs *helpers
= encoder->helper_private;
helpers->mode_set(encoder, &crtc->saved_mode,
&crtc->saved_adjusted_mode);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 0b770396548c..211069b2b951 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -505,7 +505,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
- struct drm_encoder_helper_funcs *helpers =
+ const struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
helpers->dpms(encoder, value);
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 9bb9bddd881a..001b450b27b3 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -501,20 +501,20 @@ bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
void gma_crtc_prepare(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
void gma_crtc_commit(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
void gma_crtc_disable(struct drm_crtc *crtc)
{
struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -656,7 +656,7 @@ void gma_crtc_restore(struct drm_crtc *crtc)
void gma_encoder_prepare(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs =
+ const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of prepare see psb_intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -664,7 +664,7 @@ void gma_encoder_prepare(struct drm_encoder *encoder)
void gma_encoder_commit(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs =
+ const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of commit see psb_intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index abf2248da61e..89f705c3a5eb 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -290,7 +290,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
encoder->crtc->primary->fb))
goto set_prop_error;
} else {
- struct drm_encoder_helper_funcs *funcs =
+ const struct drm_encoder_helper_funcs *funcs =
encoder->helper_private;
funcs->mode_set(encoder,
&gma_crtc->saved_mode,
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 8cc8a5abbc7b..acd38344b302 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -849,7 +849,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 2de216c2374f..1048f0c7c6ce 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -483,7 +483,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 54f73f50571a..2310d879cdc2 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -347,7 +347,7 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index b21a09451d1d..6659da88fe5b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -108,7 +108,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 88aad95bde09..ce0645d0c1e5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -625,7 +625,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS")) {
- struct drm_encoder_helper_funcs *hfuncs
+ const struct drm_encoder_helper_funcs *hfuncs
= encoder->helper_private;
hfuncs->dpms(encoder, value);
}
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index fa140e04d5fa..b728523e194f 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -27,12 +27,13 @@ struct adv7511 {
struct regmap *regmap;
struct regmap *packet_memory_regmap;
enum drm_connector_status status;
- int dpms_mode;
+ bool powered;
unsigned int f_tmds;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
+ bool edid_read;
wait_queue_head_t wq;
struct drm_encoder *encoder;
@@ -357,6 +358,48 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+
+ /*
+ * Per spec it is allowed to pulse the HDP signal to indicate that the
+ * EDID information has changed. Some monitors do this when they wakeup
+ * from standby or are enabled. When the HDP goes low the adv7511 is
+ * reset and the outputs are disabled which might cause the monitor to
+ * go to standby again. To avoid this we ignore the HDP pin for the
+ * first few seconds after enabling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
+
+ /*
+ * Most of the registers are reset during power down or when HPD is low.
+ */
+ regcache_sync(adv7511->regmap);
+
+ adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+
+ adv7511->powered = false;
+}
+
/* -----------------------------------------------------------------------------
* Interrupt and hotplug detection
*/
@@ -379,69 +422,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
- struct adv7511 *adv7511 = devid;
-
- if (adv7511_hpd(adv7511))
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
- wake_up_all(&adv7511->wq);
-
- return IRQ_HANDLED;
-}
-
-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
- unsigned int irq)
+static int adv7511_irq_process(struct adv7511 *adv7511)
{
unsigned int irq0, irq1;
- unsigned int pending;
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
if (ret < 0)
- return 0;
+ return ret;
+
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
if (ret < 0)
- return 0;
+ return ret;
- pending = (irq1 << 8) | irq0;
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- return pending & irq;
+ if (irq0 & ADV7511_INT0_HDP)
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
}
-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
- int timeout)
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;
+
+ ret = adv7511_irq_process(adv7511);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
{
- unsigned int pending;
int ret;
if (adv7511->i2c_main->irq) {
ret = wait_event_interruptible_timeout(adv7511->wq,
- adv7511_is_interrupt_pending(adv7511, irq),
- msecs_to_jiffies(timeout));
- if (ret <= 0)
- return 0;
- pending = adv7511_is_interrupt_pending(adv7511, irq);
+ adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
- if (timeout < 25)
- timeout = 25;
- do {
- pending = adv7511_is_interrupt_pending(adv7511, irq);
- if (pending)
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511);
+ if (ret < 0)
break;
+
+ if (adv7511->edid_read)
+ break;
+
msleep(25);
- timeout -= 25;
- } while (timeout >= 25);
+ }
}
- return pending;
+ return adv7511->edid_read ? 0 : -EIO;
}
-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
size_t len)
{
@@ -463,19 +508,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
return ret;
if (status != 2) {
+ adv7511->edid_read = false;
regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
block);
- ret = adv7511_wait_for_interrupt(adv7511,
- ADV7511_INT0_EDID_READY |
- ADV7511_INT1_DDC_ERROR, 200);
-
- if (!(ret & ADV7511_INT0_EDID_READY))
- return -EIO;
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
}
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-
/* Break this apart, hopefully more I2C controllers will
* support 64 byte transfers than 256 byte transfers
*/
@@ -526,9 +566,11 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
unsigned int count;
/* Reading the EDID only works if the device is powered */
- if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ if (!adv7511->powered) {
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
adv7511->current_edid_segment = -1;
@@ -536,7 +578,7 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
- if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!adv7511->powered)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
@@ -558,41 +600,10 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- adv7511->current_edid_segment = -1;
-
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- /*
- * Per spec it is allowed to pulse the HDP signal to indicate
- * that the EDID information has changed. Some monitors do this
- * when they wakeup from standby or are enabled. When the HDP
- * goes low the adv7511 is reset and the outputs are disabled
- * which might cause the monitor to go to standby again. To
- * avoid this we ignore the HDP pin for the first few seconds
- * after enabeling the output.
- */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HDP_SRC_MASK,
- ADV7511_REG_POWER2_HDP_SRC_NONE);
- /* Most of the registers are reset during power down or
- * when HPD is low
- */
- regcache_sync(adv7511->regmap);
- break;
- default:
- /* TODO: setup additional power down modes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
- regcache_mark_dirty(adv7511->regmap);
- break;
- }
-
- adv7511->dpms_mode = mode;
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
}
static enum drm_connector_status
@@ -620,10 +631,9 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
* there is a pending HPD interrupt and the cable is connected there was
* at least one transition from disconnected to connected and the chip
* has to be reinitialized. */
- if (status == connector_status_connected && hpd &&
- adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
- adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+ adv7511_power_on(adv7511);
adv7511_get_modes(encoder, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
@@ -858,7 +868,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511)
return -ENOMEM;
- adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+ adv7511->powered = false;
adv7511->status = connector_status_disconnected;
ret = adv7511_parse_dt(dev->of_node, &link_config);
@@ -918,10 +928,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
-
- adv7511->current_edid_segment = -1;
+ adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a9041d1a8ff0..5febffdb027d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/i2c/tda998x.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -387,7 +388,7 @@ set_page(struct tda998x_priv *priv, uint16_t reg)
};
int ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) {
- dev_err(&client->dev, "setpage %04x err %d\n",
+ dev_err(&client->dev, "%s %04x err %d\n", __func__,
reg, ret);
return ret;
}
@@ -1035,8 +1036,9 @@ tda998x_encoder_detect(struct tda998x_priv *priv)
connector_status_disconnected;
}
-static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
{
+ struct tda998x_priv *priv = data;
uint8_t offset, segptr;
int ret, i;
@@ -1080,8 +1082,8 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
return -ETIMEDOUT;
}
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH);
- if (ret != EDID_LENGTH) {
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
blk, ret);
return ret;
@@ -1090,82 +1092,31 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
return 0;
}
-static uint8_t *do_get_edid(struct tda998x_priv *priv)
+static int
+tda998x_encoder_get_modes(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- int j, valid_extensions = 0;
- uint8_t *block, *new;
- bool print_bad_edid = drm_debug & DRM_UT_KMS;
-
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
- return NULL;
+ struct edid *edid;
+ int n;
if (priv->rev == TDA19988)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
- /* base block fetch */
- if (read_edid_block(priv, block, 0))
- goto fail;
-
- if (!drm_edid_block_valid(block, 0, print_bad_edid))
- goto fail;
-
- /* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- goto done;
-
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto fail;
- block = new;
-
- for (j = 1; j <= block[0x7e]; j++) {
- uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
- if (read_edid_block(priv, ext_block, j))
- goto fail;
-
- if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
- goto fail;
-
- valid_extensions++;
- }
-
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto fail;
- block = new;
- }
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
-done:
if (priv->rev == TDA19988)
reg_set(priv, REG_TX4, TX4_PD_RAM);
- return block;
-
-fail:
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- kfree(block);
- return NULL;
-}
-
-static int
-tda998x_encoder_get_modes(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- struct edid *edid = (struct edid *)do_get_edid(priv);
- int n = 0;
-
- if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
}
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+
return n;
}
@@ -1547,6 +1498,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
struct i2c_client *client = to_i2c_client(dev);
struct drm_device *drm = data;
struct tda998x_priv2 *priv;
+ uint32_t crtcs = 0;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1555,9 +1507,18 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, priv);
+ if (dev->of_node)
+ crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /* If no CRTCs were found, fall back to our old behaviour */
+ if (crtcs == 0) {
+ dev_warn(dev, "Falling back to first CRTC\n");
+ crtcs = 1 << 0;
+ }
+
priv->base.encoder = &priv->encoder;
priv->connector.interlace_allowed = 1;
- priv->encoder.possible_crtcs = 1 << 0;
+ priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, &priv->base);
if (ret)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f01922591679..a69002e2257d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
@@ -83,9 +84,11 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o
+# virtual gpu code
+i915-y += i915_vgpu.o
+
# legacy horrors
-i915-y += i915_dma.o \
- i915_ums.o
+i915-y += i915_dma.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 806e812340d0..61ae8ff4eaed 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -818,23 +818,28 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
return false;
}
-static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+ unsigned start, unsigned len)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
+ int first_page = start >> PAGE_SHIFT;
+ int last_page = (len + start + 4095) >> PAGE_SHIFT;
+ int npages = last_page - first_page;
struct page **pages;
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ pages = drm_malloc_ab(npages, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- pages[i] = sg_page_iter_page(&sg_iter);
- i++;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
+ pages[i++] = sg_page_iter_page(&sg_iter);
+ if (i == npages)
+ break;
}
addr = vmap(pages, i, 0, PAGE_KERNEL);
@@ -855,61 +860,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
u32 batch_start_offset,
u32 batch_len)
{
- int ret = 0;
int needs_clflush = 0;
- u32 *src_base, *dest_base = NULL;
- u32 *src_addr, *dest_addr;
- u32 offset = batch_start_offset / sizeof(*dest_addr);
- u32 end = batch_start_offset + batch_len;
+ void *src_base, *src;
+ void *dst = NULL;
+ int ret;
- if (end > dest_obj->base.size || end > src_obj->base.size)
+ if (batch_len > dest_obj->base.size ||
+ batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
return ERR_PTR(ret);
}
- src_base = vmap_batch(src_obj);
+ src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
- src_addr = src_base + offset;
-
- if (needs_clflush)
- drm_clflush_virt_range((char *)src_addr, batch_len);
+ ret = i915_gem_object_get_pages(dest_obj);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
+ goto unmap_src;
+ }
+ i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
goto unmap_src;
}
- dest_base = vmap_batch(dest_obj);
- if (!dest_base) {
+ dst = vmap_batch(dest_obj, 0, batch_len);
+ if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM;
goto unmap_src;
}
- dest_addr = dest_base + offset;
-
- if (batch_start_offset != 0)
- memset((u8 *)dest_base, 0, batch_start_offset);
+ src = src_base + offset_in_page(batch_start_offset);
+ if (needs_clflush)
+ drm_clflush_virt_range(src, batch_len);
- memcpy(dest_addr, src_addr, batch_len);
- memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+ memcpy(dst, src, batch_len);
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
- return ret ? ERR_PTR(ret) : dest_base;
+ return ret ? ERR_PTR(ret) : dst;
}
/**
@@ -1046,34 +1051,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
u32 batch_len,
bool is_master)
{
- int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
-
- ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
- return -1;
- }
+ int ret = 0;
batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base);
}
- cmd = batch_base + (batch_start_offset / sizeof(*cmd));
-
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = batch_base + (batch_len / sizeof(*batch_end));
+ cmd = batch_base;
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
@@ -1132,7 +1129,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ i915_gem_object_unpin_pages(shadow_batch_obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e8b18e542da4..007c7d7d8295 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (vma->pin_count > 0)
pin_count++;
- seq_printf(m, " (pinned x %d)", pin_count);
+ }
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_vblank_count(dev, crtc->pipe));
+ drm_crtc_vblank_count(&crtc->base));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
@@ -1089,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
- IS_BROADWELL(dev)) {
+ IS_BROADWELL(dev) || IS_GEN9(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1108,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- reqf >>= 24;
- else
- reqf >>= 25;
+ if (IS_GEN9(dev))
+ reqf >>= 23;
+ else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
@@ -1126,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1152,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & 0xff00) >> 8);
+ (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1177,19 +1184,25 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1204,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "min GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
@@ -1778,11 +1794,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1793,11 +1810,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1828,18 +1846,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->ips.pwrctx) {
- seq_puts(m, "power context ");
- describe_obj(m, dev_priv->ips.pwrctx);
- seq_putc(m, '\n');
- }
-
- if (dev_priv->ips.renderctx) {
- seq_puts(m, "render context ");
- describe_obj(m, dev_priv->ips.renderctx);
- seq_putc(m, '\n');
- }
-
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
@@ -2183,7 +2189,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2243,6 +2249,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
+ if (!HAS_PSR(dev)) {
+ seq_puts(m, "PSR not supported\n");
+ return 0;
+ }
+
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
@@ -2255,17 +2266,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_PSR(dev)) {
- if (HAS_DDI(dev))
- enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- else {
- for_each_pipe(dev_priv, pipe) {
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
- }
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
}
}
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
@@ -2282,7 +2291,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
yesno((bool)dev_priv->psr.link_standby));
/* CHV PSR has no kind of performance counter */
- if (HAS_PSR(dev) && HAS_DDI(dev)) {
+ if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
@@ -2305,8 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
u8 crc[6];
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.dpms != DRM_MODE_DPMS_ON)
continue;
@@ -2674,7 +2682,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_width, crtc->cursor_height,
+ x, y, crtc->base.cursor->state->crtc_w,
+ crtc->base.cursor->state->crtc_h,
crtc->cursor_addr, yesno(active));
}
@@ -2850,7 +2859,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -2867,6 +2876,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev, struct intel_crtc *intel_crtc)
+{
+ struct intel_encoder *intel_encoder;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+
+ for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
+ /* Encoder connected on this CRTC */
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ seq_puts(m, "eDP:\n");
+ break;
+ case INTEL_OUTPUT_DSI:
+ seq_puts(m, "DSI:\n");
+ break;
+ case INTEL_OUTPUT_HDMI:
+ seq_puts(m, "HDMI:\n");
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ seq_puts(m, "DP:\n");
+ break;
+ default:
+ seq_printf(m, "Other encoder (id=%d).\n",
+ intel_encoder->type);
+ return;
+ }
+ }
+
+ if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Static");
+ else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Seamless");
+ else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+ seq_puts(m, "\tVBT: DRRS_type: None");
+ else
+ seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
+
+ seq_puts(m, "\n\n");
+
+ if (intel_crtc->config->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ drm_modeset_lock(&intel_crtc->base.mutex, NULL);
+
+ if (intel_crtc->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+ }
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -4189,7 +4307,7 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4206,18 +4324,10 @@ i915_max_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4226,10 +4336,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4267,7 +4374,7 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4284,18 +4391,10 @@ i915_min_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4304,10 +4403,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4374,6 +4470,112 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+
+ if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ seq_printf(m, " Available Slice Total: %u\n",
+ INTEL_INFO(dev)->slice_total);
+ seq_printf(m, " Available Subslice Total: %u\n",
+ INTEL_INFO(dev)->subslice_total);
+ seq_printf(m, " Available Subslice Per Slice: %u\n",
+ INTEL_INFO(dev)->subslice_per_slice);
+ seq_printf(m, " Available EU Total: %u\n",
+ INTEL_INFO(dev)->eu_total);
+ seq_printf(m, " Available EU Per Subslice: %u\n",
+ INTEL_INFO(dev)->eu_per_subslice);
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_eu_pg));
+
+ seq_puts(m, "SSEU Device Status\n");
+ if (IS_CHERRYVIEW(dev)) {
+ const int ss_max = 2;
+ int ss;
+ u32 sig1[ss_max], sig2[ss_max];
+
+ sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+ sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+ sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+ sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ s_tot = 1;
+ ss_per++;
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ ss_tot = ss_per;
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4;
+ int s, ss;
+ u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+ s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
+ s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
+ s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
+ eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
+ eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
+ eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
+ eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
+ eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
+ eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < s_max; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ s_tot++;
+ ss_per = INTEL_INFO(dev)->subslice_per_slice;
+ ss_tot += ss_per;
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+ eu_mask[ss%2]);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ }
+ }
+ seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
+ seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
+ seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
+ seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
+ seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
+
+ return 0;
+}
+
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -4487,6 +4689,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_sseu_status", i915_sseu_status, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a46787129e7..68e0c85a17cf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include <linux/pci.h>
#include <linux/console.h>
@@ -67,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
@@ -149,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -605,16 +619,128 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
}
}
+ /* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) {
- u32 fuse, mask_eu;
+ u32 fuse, eu_dis;
fuse = I915_READ(CHV_FUSE_GT);
- mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK |
- CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total = 16 - hweight32(mask_eu);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+ GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT;
+
+ eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
+ eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
+ eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total *
+ info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ u32 n_disabled;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ n_disabled = hweight8(eu_disable[s] >>
+ (ss * eu_max));
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_max - n_disabled == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_max - n_disabled;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
+ info->has_subslice_pg = 0;
+ info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
}
+ DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+ DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+ info->has_slice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+ info->has_subslice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+ info->has_eu_pg ? "y" : "n");
}
/**
@@ -637,17 +763,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
- /* Refuse to load on gen6+ without kms enabled. */
- if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
- DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
- return -ENODEV;
- }
-
- /* UMS needs agp support. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
- return -EINVAL;
-
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -717,20 +832,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_regs;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_gtt;
- }
+ /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over. */
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
- ret = i915_kick_out_vgacon(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_gtt;
- }
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
}
pci_set_master(dev->pdev);
@@ -834,14 +947,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_power_domains_init(dev_priv);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev);
- if (ret < 0) {
- DRM_ERROR("failed to init modeset\n");
- goto out_power_well;
- }
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_power_well;
}
+ /*
+ * Notify a valid surface after modesetting,
+ * when running inside a VM.
+ */
+ if (intel_vgpu_active(dev))
+ I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
i915_setup_sysfs(dev);
if (INTEL_INFO(dev)->num_pipes) {
@@ -921,28 +1039,25 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_fbdev_fini(dev);
+ intel_fbdev_fini(dev);
drm_vblank_cleanup(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
+ intel_modeset_cleanup(dev);
- vga_switcheroo_unregister_client(dev->pdev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
}
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
@@ -952,17 +1067,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_fini(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Flush any outstanding unpin_work. */
- flush_workqueue(dev_priv->wq);
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
- i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_stolen(dev);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_stolen(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1023,8 +1136,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_preclose(dev, file);
+ intel_modeset_preclose(dev, file);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1087,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c66b568bb81..c24c3f1ff8a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
};
static const struct intel_device_info intel_cherryview_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -369,6 +368,19 @@ static const struct intel_device_info intel_skylake_info = {
IVB_CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_gt3_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -406,7 +418,9 @@ static const struct intel_device_info intel_skylake_info = {
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info), \
- INTEL_SKL_IDS(&intel_skylake_info)
+ INTEL_SKL_GT1_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT2_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -553,6 +567,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
+ int error;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -567,37 +582,32 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(dev->pdev);
- /* If KMS is active, we do the leavevt stuff here */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error;
-
- error = i915_gem_suspend(dev);
- if (error) {
- dev_err(&dev->pdev->dev,
- "GEM idle failed, resume might fail\n");
- return error;
- }
+ error = i915_gem_suspend(dev);
+ if (error) {
+ dev_err(&dev->pdev->dev,
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev);
- /*
- * Disable CRTCs directly since we want to preserve sw state
- * for _thaw. Also, power gate the CRTC power wells.
- */
- drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc)
- intel_crtc_control(crtc, false);
- drm_modeset_unlock_all(dev);
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw. Also, power gate the CRTC power wells.
+ */
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev);
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_hpd_cancel_work(dev_priv);
- intel_suspend_encoders(dev_priv);
+ intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
- }
+ intel_suspend_hw(dev);
i915_gem_suspend_gtt_mappings(dev);
@@ -679,53 +689,48 @@ static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
intel_opregion_setup(dev);
- /* KMS EnterVT equivalent */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_init_pch_refclk(dev);
- drm_mode_config_reset(dev);
+ intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
- }
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (i915_gem_init_hw(dev)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ }
+ mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- intel_runtime_pm_enable_interrupts(dev_priv);
+ /* We need working interrupts for modeset enabling ... */
+ intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
- drm_modeset_unlock_all(dev);
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev);
- /*
- * ... but also need to make sure that hotplug processing
- * doesn't cause havoc. Like in the driver load code we don't
- * bother with the tiny race here where we might loose hotplug
- * notifications.
- * */
- intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
- }
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ intel_hpd_init(dev_priv);
+ /* Config may have changed between suspend and resume */
+ drm_helper_hpd_irq_event(dev);
intel_opregion_init(dev);
@@ -861,38 +866,29 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
- ret = i915_gem_init_hw(dev);
+ /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+ dev_priv->gpu_error.reload_in_reset = true;
- dev_priv->gpu_error.reload_in_reset = false;
+ ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
- }
+ dev_priv->gpu_error.reload_in_reset = false;
- /*
- * FIXME: This races pretty badly against concurrent holders of
- * ring interrupts. This is possible since we've started to drop
- * dev->struct_mutex in select places when waiting for the gpu.
- */
-
- /*
- * rps/rc6 re-init is necessary to restore state lost after the
- * reset and the re-install of gt irqs. Skip for ironlake per
- * previous concerns that it doesn't respond well to some forms
- * of re-init after reset.
- */
- if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
- } else {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
}
+ /*
+ * rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of gt irqs. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset.
+ */
+ if (INTEL_INFO(dev)->gen > 5)
+ intel_enable_gt_powersave(dev);
+
return 0;
}
@@ -1640,11 +1636,9 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET)) {
driver.get_vblank_timestamp = NULL;
-#ifndef CONFIG_DRM_I915_UMS
/* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
return 0;
-#endif
}
/*
@@ -1660,10 +1654,8 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
-#ifndef CONFIG_DRM_I915_UMS
if (!(driver.driver_features & DRIVER_MODESET))
return; /* Never loaded a driver. */
-#endif
drm_pci_exit(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b4faa2df9d3d..8ae6f7f06b3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
+#include <uapi/drm/drm_fourcc.h>
#include "i915_reg.h"
#include "intel_bios.h"
@@ -55,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150130"
+#define DRIVER_DATE "20150327"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -69,6 +70,9 @@
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@@ -222,9 +226,14 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_plane(pipe, p) \
- for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
-#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -237,6 +246,12 @@ enum hpd_pin {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_connector(dev, intel_connector) \
+ list_for_each_entry(intel_connector, \
+ &dev->mode_config.connector_list, \
+ base.head)
+
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -412,6 +427,8 @@ struct drm_i915_error_state {
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
@@ -529,7 +546,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -538,7 +555,7 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_device *dev);
+ void (*modeset_global_resources)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -692,7 +709,18 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
- unsigned int eu_total;
+
+ /* Slice/subslice/EU info */
+ u8 slice_total;
+ u8 subslice_total;
+ u8 subslice_per_slice;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
};
#undef DEFINE_FLAG
@@ -771,11 +799,20 @@ struct intel_context {
struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
- unsigned long size;
+ unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
- enum plane plane;
+ unsigned int possible_framebuffer_bits;
+ unsigned int busy_bits;
+ struct intel_crtc *crtc;
int y;
struct drm_mm_node compressed_fb;
@@ -787,14 +824,6 @@ struct i915_fbc {
* possible. */
bool enabled;
- /* On gen8 some rings cannont perform fbc clean operation so for now
- * we are doing this on SW with mmio.
- * This variable works in the opposite information direction
- * of ring->fbc_dirty telling software on frontbuffer tracking
- * to perform the cache clean on sw side.
- */
- bool need_sw_cache_clean;
-
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -888,150 +917,21 @@ struct intel_gmbus {
};
struct i915_suspend_saved_registers {
- u8 saveLBB;
- u32 saveDSPACNTR;
- u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 savePIPEACONF;
- u32 savePIPEBCONF;
- u32 savePIPEASRC;
- u32 savePIPEBSRC;
- u32 saveFPA0;
- u32 saveFPA1;
- u32 saveDPLL_A;
- u32 saveDPLL_A_MD;
- u32 saveHTOTAL_A;
- u32 saveHBLANK_A;
- u32 saveHSYNC_A;
- u32 saveVTOTAL_A;
- u32 saveVBLANK_A;
- u32 saveVSYNC_A;
- u32 saveBCLRPAT_A;
- u32 saveTRANSACONF;
- u32 saveTRANS_HTOTAL_A;
- u32 saveTRANS_HBLANK_A;
- u32 saveTRANS_HSYNC_A;
- u32 saveTRANS_VTOTAL_A;
- u32 saveTRANS_VBLANK_A;
- u32 saveTRANS_VSYNC_A;
- u32 savePIPEASTAT;
- u32 saveDSPASTRIDE;
- u32 saveDSPASIZE;
- u32 saveDSPAPOS;
- u32 saveDSPAADDR;
- u32 saveDSPASURF;
- u32 saveDSPATILEOFF;
- u32 savePFIT_PGM_RATIOS;
- u32 saveBLC_HIST_CTL;
- u32 saveBLC_PWM_CTL;
- u32 saveBLC_PWM_CTL2;
- u32 saveBLC_CPU_PWM_CTL;
- u32 saveBLC_CPU_PWM_CTL2;
- u32 saveFPB0;
- u32 saveFPB1;
- u32 saveDPLL_B;
- u32 saveDPLL_B_MD;
- u32 saveHTOTAL_B;
- u32 saveHBLANK_B;
- u32 saveHSYNC_B;
- u32 saveVTOTAL_B;
- u32 saveVBLANK_B;
- u32 saveVSYNC_B;
- u32 saveBCLRPAT_B;
- u32 saveTRANSBCONF;
- u32 saveTRANS_HTOTAL_B;
- u32 saveTRANS_HBLANK_B;
- u32 saveTRANS_HSYNC_B;
- u32 saveTRANS_VTOTAL_B;
- u32 saveTRANS_VBLANK_B;
- u32 saveTRANS_VSYNC_B;
- u32 savePIPEBSTAT;
- u32 saveDSPBSTRIDE;
- u32 saveDSPBSIZE;
- u32 saveDSPBPOS;
- u32 saveDSPBADDR;
- u32 saveDSPBSURF;
- u32 saveDSPBTILEOFF;
- u32 saveVGA0;
- u32 saveVGA1;
- u32 saveVGA_PD;
- u32 saveVGACNTRL;
- u32 saveADPA;
u32 saveLVDS;
u32 savePP_ON_DELAYS;
u32 savePP_OFF_DELAYS;
- u32 saveDVOA;
- u32 saveDVOB;
- u32 saveDVOC;
u32 savePP_ON;
u32 savePP_OFF;
u32 savePP_CONTROL;
u32 savePP_DIVISOR;
- u32 savePFIT_CONTROL;
- u32 save_palette_a[256];
- u32 save_palette_b[256];
u32 saveFBC_CONTROL;
- u32 saveIER;
- u32 saveIIR;
- u32 saveIMR;
- u32 saveDEIER;
- u32 saveDEIMR;
- u32 saveGTIER;
- u32 saveGTIMR;
- u32 saveFDI_RXA_IMR;
- u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
- u8 saveMSR;
- u8 saveSR[8];
- u8 saveGR[25];
- u8 saveAR_INDEX;
- u8 saveAR[21];
- u8 saveDACMASK;
- u8 saveCR[37];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
- u32 saveCURACNTR;
- u32 saveCURAPOS;
- u32 saveCURABASE;
- u32 saveCURBCNTR;
- u32 saveCURBPOS;
- u32 saveCURBBASE;
- u32 saveCURSIZE;
- u32 saveDP_B;
- u32 saveDP_C;
- u32 saveDP_D;
- u32 savePIPEA_GMCH_DATA_M;
- u32 savePIPEB_GMCH_DATA_M;
- u32 savePIPEA_GMCH_DATA_N;
- u32 savePIPEB_GMCH_DATA_N;
- u32 savePIPEA_DP_LINK_M;
- u32 savePIPEB_DP_LINK_M;
- u32 savePIPEA_DP_LINK_N;
- u32 savePIPEB_DP_LINK_N;
- u32 saveFDI_RXA_CTL;
- u32 saveFDI_TXA_CTL;
- u32 saveFDI_RXB_CTL;
- u32 saveFDI_TXB_CTL;
- u32 savePFA_CTL_1;
- u32 savePFB_CTL_1;
- u32 savePFA_WIN_SZ;
- u32 savePFB_WIN_SZ;
- u32 savePFA_WIN_POS;
- u32 savePFB_WIN_POS;
- u32 savePCH_DREF_CONTROL;
- u32 saveDISP_ARB_CTL;
- u32 savePIPEA_DATA_M1;
- u32 savePIPEA_DATA_N1;
- u32 savePIPEA_LINK_M1;
- u32 savePIPEA_LINK_N1;
- u32 savePIPEB_DATA_M1;
- u32 savePIPEB_DATA_N1;
- u32 savePIPEB_LINK_M1;
- u32 savePIPEB_LINK_N1;
- u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -1128,13 +1028,12 @@ struct intel_gen6_power_mgmt {
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
- u32 ei_interrupt_count;
-
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -1171,9 +1070,6 @@ struct intel_ilk_power_mgmt {
int c_m;
int r_t;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
};
struct drm_i915_private;
@@ -1455,6 +1351,7 @@ struct intel_vbt_data {
bool edp_initialized;
bool edp_support;
int edp_bpp;
+ bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
@@ -1515,6 +1412,25 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct vlv_wm_values {
+ struct {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+ } pipe[3];
+
+ struct {
+ uint16_t plane;
+ uint8_t cursor;
+ } sr;
+
+ struct {
+ uint8_t cursor;
+ uint8_t sprite[2];
+ uint8_t primary;
+ } ddl[3];
+};
+
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
@@ -1641,6 +1557,10 @@ struct i915_workarounds {
u32 count;
};
+struct i915_virtual_gpu {
+ bool active;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1653,6 +1573,8 @@ struct drm_i915_private {
struct intel_uncore uncore;
+ struct i915_virtual_gpu vgpu;
+
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1871,6 +1793,7 @@ struct drm_i915_private {
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
+ struct vlv_wm_values vlv;
};
} wm;
@@ -2142,7 +2065,7 @@ struct drm_i915_gem_request {
u32 tail;
/**
- * Context related to this request
+ * Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
* it persists while any request is linked to it. Requests themselves
@@ -2152,6 +2075,7 @@ struct drm_i915_gem_request {
* context.
*/
struct intel_context *ctx;
+ struct intel_ringbuffer *ringbuf;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
@@ -2166,6 +2090,9 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
+ /** process identifier submitting this request */
+ struct pid *pid;
+
uint32_t uniq;
/**
@@ -2352,6 +2279,7 @@ struct drm_i915_cmd_table {
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
@@ -2374,9 +2302,6 @@ struct drm_i915_cmd_table {
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
-#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
- INTEL_DEVID(dev) == 0x0106 || \
- INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
@@ -2400,6 +2325,12 @@ struct drm_i915_cmd_table {
INTEL_DEVID(dev) == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define SKL_REVID_A0 (0x0)
+#define SKL_REVID_B0 (0x1)
+#define SKL_REVID_C0 (0x2)
+#define SKL_REVID_D0 (0x3)
+#define SKL_REVID_E0 (0x4)
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2499,6 +2430,7 @@ struct drm_i915_cmd_table {
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
#include "i915_trace.h"
@@ -2507,14 +2439,11 @@ extern int i915_max_ioctl;
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
extern int i915_resume_legacy(struct drm_device *dev);
-extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
-extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
- unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
@@ -2534,11 +2463,12 @@ struct i915_params {
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
+ bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
- bool mmio_debug;
+ int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
};
@@ -2591,6 +2521,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+static inline bool intel_vgpu_active(struct drm_device *dev)
+{
+ return to_i915(dev)->vgpu.active;
+}
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2669,12 +2603,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target,
- unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2691,20 +2619,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
-int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view);
-static inline
-int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
-{
- return i915_gem_object_pin_view(obj, vm, alignment, flags,
- &i915_ggtt_view_normal);
-}
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
@@ -2844,8 +2768,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2868,60 +2794,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
- return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}
+
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
-}
+ struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-static inline
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
-}
-
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
- &i915_ggtt_view_normal);
-}
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->pin_count > 0)
- return true;
- return false;
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
@@ -2944,13 +2856,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
+ return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
static inline unsigned long
@@ -2974,7 +2880,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
-void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3046,6 +2958,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target,
+ unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -3121,10 +3044,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_ums.c */
-void i915_save_display_reg(struct drm_device *dev);
-void i915_restore_display_reg(struct drm_device *dev);
-
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);
@@ -3196,8 +3115,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
@@ -3210,8 +3128,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void intel_notify_mmio_flip(struct intel_engine_cs *ring);
-
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 27ea6bdebce7..d07c0b1fb498 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,9 +29,9 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -52,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
-static int i915_gem_shrinker_oom(struct notifier_block *nb,
- unsigned long event,
- void *ptr);
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
@@ -350,7 +341,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
- int ret;
+ int ret = 0;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
@@ -359,6 +350,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -369,13 +361,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out;
+ }
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
- return 0;
+
+out:
+ intel_fb_obj_flush(obj, false);
+ return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -809,6 +806,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
while (remain > 0) {
/* Operation in this page
*
@@ -829,7 +828,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
- goto out_unpin;
+ goto out_flush;
}
remain -= page_length;
@@ -837,6 +836,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset += page_length;
}
+out_flush:
+ intel_fb_obj_flush(obj, false);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -951,6 +952,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
i915_gem_object_pin_pages(obj);
offset = args->offset;
@@ -1029,6 +1032,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
+ intel_fb_obj_flush(obj, false);
return ret;
}
@@ -1922,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2033,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target, unsigned flags)
-{
- const struct {
- struct list_head *list;
- unsigned int bit;
- } phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
- { NULL, 0 },
- }, *phase;
- unsigned long count = 0;
-
- /*
- * As we may completely rewrite the (un)bound list whilst unbinding
- * (due to retiring requests) we have to strictly process only
- * one element of the list at the time, and recheck the list
- * on every iteration.
- *
- * In particular, we must hold a reference whilst removing the
- * object as we may end up waiting for and/or retiring the objects.
- * This might release the final reference (held by the active list)
- * and result in the object being freed from under us. This is
- * similar to the precautions the eviction code must take whilst
- * removing objects.
- *
- * Also note that although these lists do not hold a reference to
- * the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
- for (phase = phases; phase->list; phase++) {
- struct list_head still_in_list;
-
- if ((flags & phase->bit) == 0)
- continue;
-
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (flags & I915_SHRINK_PURGEABLE &&
- !i915_gem_object_is_purgeable(obj))
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- /* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, phase->list);
- }
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
- i915_gem_evict_everything(dev_priv->dev);
- return i915_gem_shrink(dev_priv, LONG_MAX,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2492,6 +2411,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
+
+ request->pid = get_pid(task_pid(current));
}
trace_i915_gem_request_add(request);
@@ -2572,6 +2493,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ put_pid(request->pid);
+
i915_gem_request_unreference(request);
}
@@ -2744,7 +2667,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
@@ -2755,23 +2677,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
trace_i915_gem_request_retire(request);
- /* This is one of the few common intersection points
- * between legacy ringbuffer submission and execlists:
- * we need to tell them apart in order to find the correct
- * ringbuffer to which the request belongs to.
- */
- if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
-
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->postfix;
+ request->ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
@@ -3516,9 +3427,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3530,6 +3441,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3568,7 +3482,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+ vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+ i915_gem_obj_lookup_or_create_vma(obj, vm);
+
if (IS_ERR(vma))
goto err_unpin;
@@ -3598,6 +3514,17 @@ search_free:
if (ret)
goto err_remove_node;
+ /* allocate before insert / bind */
+ if (vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ goto err_remove_node;
+ }
+
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level,
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3768,7 +3695,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3950,7 +3877,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
@@ -3986,7 +3914,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+ view->type == I915_GGTT_VIEW_NORMAL ?
+ PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
@@ -4014,9 +3944,11 @@ err_unpin_display:
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, view);
+
obj->pin_display = is_pin_display(obj);
}
@@ -4083,7 +4015,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4165,12 +4097,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
+ uint32_t alignment,
+ uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4186,17 +4118,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return -EINVAL;
+
+ vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+ i915_gem_obj_to_vma(obj, vm);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_vma_misplaced(vma, alignment, flags)) {
+ unsigned long offset;
+ offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+ i915_gem_obj_offset(obj, vm);
WARN(vma->pin_count,
- "bo is already pinned with incorrect alignment:"
+ "bo is already pinned in %s with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset_view(obj, vm, view->type),
+ ggtt_view ? "ggtt" : "ppgtt",
+ offset,
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
@@ -4210,8 +4154,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
- flags, view);
+ /* In true PPGTT, bind has possibly changed PDEs, which
+ * means we must do a context switch before the GPU can
+ * accurately read some of the VMAs.
+ */
+ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
@@ -4237,7 +4185,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + obj->base.size <=
+ mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
@@ -4252,16 +4200,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
return 0;
}
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ return i915_gem_object_do_pin(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+ alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ if (WARN_ONCE(!view, "no view specified"))
+ return -EINVAL;
+
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ alignment, flags | PIN_GLOBAL);
+}
+
void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
- BUG_ON(vma->pin_count == 0);
- BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+ WARN_ON(vma->pin_count == 0);
+ WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0)
+ if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
obj->pin_mappable = false;
}
@@ -4382,7 +4355,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+ if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4557,15 +4530,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->ggtt_view.type == view->type)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
return vma;
+ }
+ return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_vma *vma;
+
+ if (WARN_ONCE(!view, "no view specified"))
+ return ERR_PTR(-EINVAL);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma;
return NULL;
}
@@ -4612,10 +4603,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev);
- /* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev);
-
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
@@ -4986,18 +4973,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- }
-
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /* Old X drivers will take 0-2 for front, back, depth buffers */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->fence_reg_start = 3;
-
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
@@ -5005,6 +4982,10 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
/* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
@@ -5014,13 +4995,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
-
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_shrinker_init(dev_priv);
i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
@@ -5112,106 +5087,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- *unlock = false;
- } else
- *unlock = true;
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
- return true;
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
+ return -1;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- count++;
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return 0;
-
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
- count += obj->base.size >> PAGE_SHIFT;
- }
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma->node.start;
- return count;
+ WARN(1, "global vma for this object not found.\n");
+ return -1;
}
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm && vma->ggtt_view.type == view)
- return vma->node.start;
-
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
}
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
+
+ return false;
}
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm &&
- vma->ggtt_view.type == view &&
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5239,118 +5178,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
if (vma->vm == vm)
return vma->node.size;
-
+ }
return 0;
}
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- unsigned long freed;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
-
- freed = i915_gem_shrink(dev_priv,
- sc->nr_to_scan,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
- bool was_interruptible;
- bool unlock;
-
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
- }
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- freed_pages = i915_gem_shrink_all(dev_priv);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- /* Because we may be allocating inside our own driver, we cannot
- * assert that there are no objects with pinned pages that are not
- * being pointed to by hardware.
- */
- unbound = bound = pinned = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- unbound += obj->base.size;
- }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- bound += obj->base.size;
+ if (vma->pin_count > 0)
+ return true;
}
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
- if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
-
- *(unsigned long *)ptr += freed_pages;
- return NOTIFY_DONE;
+ return false;
}
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma;
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8603bf48d3ee..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* In execlists mode we will unreference the context when the execlist
- * queue is cleared and the requests destroyed.
- */
- if (i915.enable_execlists)
+ if (i915.enable_execlists) {
+ struct intel_context *ctx;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ intel_lr_context_reset(dev, ctx);
+ }
+
return;
+ }
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
@@ -565,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
return ret;
}
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+ struct intel_context *from,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (to->remap_slice)
+ return false;
+
+ if (to->ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &to->ppgtt->pd_dirty_rings))
+ return true;
+ } else if (dev_priv->mm.aliasing_ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (INTEL_INFO(ring->dev)->gen < 8)
+ return true;
+
+ if (ring != &dev_priv->ring[RCS])
+ return true;
+
+ return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+ u32 hw_flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (!IS_GEN8(ring->dev))
+ return false;
+
+ if (ring != &dev_priv->ring[RCS])
+ return false;
+
+ if (hw_flags & MI_RESTORE_INHIBIT)
+ return true;
+
+ return false;
+}
+
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
@@ -580,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && !to->remap_slice)
+ if (should_skip_switch(ring, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
@@ -598,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
- if (to->ppgtt) {
+ if (needs_pd_load_pre(ring, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
+
+ /* Doing a PD load always reloads the page dirs */
+ clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -633,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+ * space. This means we must enforce that a page table load
+ * occur when this occurs. */
+ } else if (to->ppgtt &&
+ test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ hw_flags |= MI_FORCE_RESTORE;
+
+ /* We should never emit switch_mm more than once */
+ WARN_ON(needs_pd_load_pre(ring, to) &&
+ needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
if (ret)
goto unpin_out;
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(ring, to, hw_flags)) {
+ trace_switch_mm(ring, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret) {
+ DRM_ERROR("Failed to change address space on context switch\n");
+ goto unpin_out;
+ }
+ }
+
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
@@ -677,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
- uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
done:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e3a49d94da3a..d09e35ed9c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
*
* This function is used by the object/vma binding code.
*
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 38a742532c4f..a3190e793ed4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
- !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+ /* This is not a fast path, so KISS. */
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+ *(uint32_t *)addr = value;
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = (int)reloc->delta + target_offset;
+ char *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+ }
+
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
@@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc, target_offset);
- else
+ else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
+ else if (cpu_has_clflush)
+ ret = relocate_entry_clflush(obj, reloc, target_offset);
+ else {
+ WARN_ONCE(1, "Impossible case in relocation handling\n");
+ ret = -ENODEV;
+ }
if (ret)
return ret;
@@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
return ret;
}
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+ return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+ __EXEC_OBJECT_NEEDS_MAP;
+}
+
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
@@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
int ret;
flags = 0;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_GLOBAL | PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
- if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
- flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ }
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+ if ((ret == -ENOSPC || ret == -E2BIG) &&
+ only_mappable_for_reloc(entry->flags))
+ ret = i915_gem_object_pin(obj, vma->vm,
+ entry->alignment,
+ flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
if (ret)
return ret;
@@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma)
vma->node.start & (entry->alignment - 1))
return true;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
- return true;
-
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
+ /* avoid costly ping-pong once a batch bo ended up non-mappable */
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ return !only_mappable_for_reloc(entry->flags);
+
return false;
}
@@ -971,7 +1035,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1076,16 +1140,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
- bool is_master,
- u32 *flags)
+ bool is_master)
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
- bool need_reloc = false;
+ struct i915_vma *vma;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
- batch_obj->base.size);
+ PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
@@ -1095,40 +1158,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
batch_start_offset,
batch_len,
is_master);
- if (ret) {
- if (ret == -EACCES)
- return batch_obj;
- } else {
- struct i915_vma *vma;
+ if (ret)
+ goto err;
- memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+ if (ret)
+ goto err;
- vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
- vma->exec_entry = shadow_exec_entry;
- vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
- drm_gem_object_reference(&shadow_batch_obj->base);
- i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
- list_add_tail(&vma->exec_list, &eb->vmas);
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
- shadow_batch_obj->base.pending_read_domains =
- batch_obj->base.pending_read_domains;
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ list_add_tail(&vma->exec_list, &eb->vmas);
- /*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically
- * don't want that set when the command parser is
- * enabled.
- *
- * FIXME: with aliasing ppgtt, buffers that should only
- * be in ggtt still end up in the aliasing ppgtt. remove
- * this check when that is fixed.
- */
- if (USES_FULL_PPGTT(dev))
- *flags |= I915_DISPATCH_SECURE;
- }
+ shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ return shadow_batch_obj;
- return ret ? ERR_PTR(ret) : shadow_batch_obj;
+err:
+ if (ret == -EACCES) /* unhandled chained batch */
+ return batch_obj;
+ else
+ return ERR_PTR(ret);
}
int
@@ -1138,7 +1191,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1198,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
+ if (ctx->ppgtt)
+ WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ "%s didn't clear reload\n", ring->name);
+ else if (dev_priv->mm.aliasing_ppgtt)
+ WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
+ (1<<ring->id), "%s didn't clear reload\n", ring->name);
+
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
@@ -1266,19 +1326,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
return ret;
}
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1353,7 +1413,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
- u32 flags;
+ u32 dispatch_flags;
int ret;
bool need_relocs;
@@ -1364,15 +1424,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
- flags = 0;
+ dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
- flags |= I915_DISPATCH_SECURE;
+ dispatch_flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
- flags |= I915_DISPATCH_PINNED;
+ dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1494,12 +1554,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
args->batch_len,
- file->is_master,
- &flags);
+ file->is_master);
if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj);
goto err;
}
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ dispatch_flags |= I915_DISPATCH_SECURE;
+
+ exec_start = 0;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1507,14 +1582,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE) {
+ if (dispatch_flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
- * so we don't really have issues with mutliple objects not
+ * so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
@@ -1527,7 +1602,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start, flags);
+ &eb->vmas, batch_obj, exec_start,
+ dispatch_flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1535,7 +1611,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
- if (flags & I915_DISPATCH_SECURE)
+ if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dccdc8aad2e2..0239fbff7bf7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -66,8 +67,9 @@
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _view suffix. They take the struct i915_ggtt_view parameter
- * encapsulating all metadata required to implement a view.
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -91,6 +93,9 @@
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+ .type = I915_GGTT_VIEW_ROTATED
+};
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -103,6 +108,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (intel_vgpu_active(dev))
+ has_full_ppgtt = false; /* emulation is too hard */
+
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -138,17 +146,16 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_aliasing_ppgtt ? 1 : 0;
}
-
static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
-static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
{
- gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
switch (level) {
@@ -166,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
- gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
@@ -179,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
return pde;
}
-static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -201,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -225,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags)
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -241,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -254,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -275,6 +282,162 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+#define i915_dma_unmap_single(px, dev) \
+ __i915_dma_unmap_single((px)->daddr, dev)
+
+static inline void __i915_dma_unmap_single(dma_addr_t daddr,
+ struct drm_device *dev)
+{
+ struct device *device = &dev->pdev->dev;
+
+ dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+}
+
+/**
+ * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
+ * @px: Page table/dir/etc to get a DMA map for
+ * @dev: drm device
+ *
+ * Page table allocations are unified across all gens. They always require a
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
+ *
+ * Return: 0 if success.
+ */
+#define i915_dma_map_single(px, dev) \
+ i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+
+static inline int i915_dma_map_page_single(struct page *page,
+ struct drm_device *dev,
+ dma_addr_t *daddr)
+{
+ struct device *device = &dev->pdev->dev;
+
+ *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, *daddr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+ struct drm_device *dev)
+{
+ if (WARN_ON(!pt->page))
+ return;
+
+ i915_dma_unmap_single(pt, dev);
+ __free_page(pt->page);
+ kfree(pt->used_ptes);
+ kfree(pt);
+}
+
+static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+{
+ struct i915_page_table_entry *pt;
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES : GEN6_PTES;
+ int ret = -ENOMEM;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return ERR_PTR(-ENOMEM);
+
+ pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+ GFP_KERNEL);
+
+ if (!pt->used_ptes)
+ goto fail_bitmap;
+
+ pt->page = alloc_page(GFP_KERNEL);
+ if (!pt->page)
+ goto fail_page;
+
+ ret = i915_dma_map_single(pt, dev);
+ if (ret)
+ goto fail_dma;
+
+ return pt;
+
+fail_dma:
+ __free_page(pt->page);
+fail_page:
+ kfree(pt->used_ptes);
+fail_bitmap:
+ kfree(pt);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * alloc_pt_range() - Allocate a multiple page tables
+ * @pd: The page directory which will have at least @count entries
+ * available to point to the allocated page tables.
+ * @pde: First page directory entry for which we are allocating.
+ * @count: Number of pages to allocate.
+ * @dev: DRM device.
+ *
+ * Allocates multiple page table pages and sets the appropriate entries in the
+ * page table structure within the page directory. Function cleans up after
+ * itself on any failures.
+ *
+ * Return: 0 if allocation succeeded.
+ */
+static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
+ struct drm_device *dev)
+{
+ int i, ret;
+
+ /* 512 is the max page tables per page_directory on any platform. */
+ if (WARN_ON(pde + count > I915_PDES))
+ return -EINVAL;
+
+ for (i = pde; i < pde + count; i++) {
+ struct i915_page_table_entry *pt = alloc_pt_single(dev);
+
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto err_out;
+ }
+ WARN(pd->page_table[i],
+ "Leaking page directory entry %d (%p)\n",
+ i, pd->page_table[i]);
+ pd->page_table[i] = pt;
+ }
+
+ return 0;
+
+err_out:
+ while (i-- > pde)
+ unmap_and_free_pt(pd->page_table[i], dev);
+ return ret;
+}
+
+static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+{
+ if (pd->page) {
+ __free_page(pd->page);
+ kfree(pd);
+ }
+}
+
+static struct i915_page_directory_entry *alloc_pd_single(void)
+{
+ struct i915_page_directory_entry *pd;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pd->page) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val)
@@ -304,10 +467,10 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int i, ret;
/* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+ int used_pd = ppgtt->num_pd_entries / I915_PDES;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -323,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen8_pte_t *pt_vaddr, scratch_pte;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -334,11 +497,28 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+ struct i915_page_directory_entry *pd;
+ struct i915_page_table_entry *pt;
+ struct page *page_table;
+
+ if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+ continue;
+
+ pd = ppgtt->pdp.page_directory[pdpe];
+
+ if (WARN_ON(!pd->page_table[pde]))
+ continue;
+
+ pt = pd->page_table[pde];
+
+ if (WARN_ON(!pt->page))
+ continue;
+
+ page_table = pt->page;
last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES_PER_PAGE)
- last_pte = GEN8_PTES_PER_PAGE;
+ if (last_pte > GEN8_PTES)
+ last_pte = GEN8_PTES;
pt_vaddr = kmap_atomic(page_table);
@@ -352,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
pte = 0;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -366,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr;
+ gen8_pte_t *pt_vaddr;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -375,21 +555,26 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break;
- if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+ if (pt_vaddr == NULL) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
+ struct i915_page_table_entry *pt = pd->page_table[pde];
+ struct page *page_table = pt->page;
+
+ pt_vaddr = kmap_atomic(page_table);
+ }
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -403,29 +588,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct page **pt_pages)
+static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
{
int i;
- if (pt_pages == NULL)
+ if (!pd->page)
return;
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
- if (pt_pages[i])
- __free_pages(pt_pages[i], 0);
+ for (i = 0; i < I915_PDES; i++) {
+ if (WARN_ON(!pd->page_table[i]))
+ continue;
+
+ unmap_and_free_pt(pd->page_table[i], dev);
+ pd->page_table[i] = NULL;
+ }
}
-static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_dma_addr[i]);
- }
+ if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+ continue;
- __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+ }
}
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -436,14 +625,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
- if (!ppgtt->pd_dma_addr[i])
+ if (!ppgtt->pdp.page_directory[i]->daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ struct i915_page_table_entry *pt;
+ dma_addr_t addr;
+
+ if (WARN_ON(!pd->page_table[j]))
+ continue;
+
+ pt = pd->page_table[j];
+ addr = pt->daddr;
+
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -460,86 +658,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-static struct page **__gen8_alloc_page_tables(void)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
- struct page **pt_pages;
- int i;
-
- pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
- if (!pt_pages)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
- pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!pt_pages[i])
- goto bail;
- }
-
- return pt_pages;
-
-bail:
- gen8_free_page_tables(pt_pages);
- kfree(pt_pages);
- return ERR_PTR(-ENOMEM);
-}
-
-static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- struct page **pt_pages[GEN8_LEGACY_PDPS];
int i, ret;
- for (i = 0; i < max_pdp; i++) {
- pt_pages[i] = __gen8_alloc_page_tables();
- if (IS_ERR(pt_pages[i])) {
- ret = PTR_ERR(pt_pages[i]);
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
+ 0, I915_PDES, ppgtt->base.dev);
+ if (ret)
goto unwind_out;
- }
}
- /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
- * "atomic" - for cleanup purposes.
- */
- for (i = 0; i < max_pdp; i++)
- ppgtt->gen8_pt_pages[i] = pt_pages[i];
-
return 0;
unwind_out:
- while (i--) {
- gen8_free_page_tables(pt_pages[i]);
- kfree(pt_pages[i]);
- }
+ while (i--)
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- return ret;
+ return -ENOMEM;
}
-static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
{
int i;
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- return -ENOMEM;
+ for (i = 0; i < max_pdp; i++) {
+ ppgtt->pdp.page_directory[i] = alloc_pd_single();
+ if (IS_ERR(ppgtt->pdp.page_directory[i]))
+ goto unwind_out;
}
- return 0;
-}
+ ppgtt->num_pd_pages = max_pdp;
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
-static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
- if (!ppgtt->pd_pages)
- return -ENOMEM;
+ return 0;
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+unwind_out:
+ while (i--)
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
- return 0;
+ return -ENOMEM;
}
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -551,18 +710,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
- if (ret) {
- __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return ret;
- }
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt);
+ if (ret)
+ goto err_out;
- ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+ ppgtt->num_pd_entries = max_pdp * I915_PDES;
- ret = gen8_ppgtt_allocate_dma(ppgtt);
- if (ret)
- gen8_ppgtt_free(ppgtt);
+ return 0;
+err_out:
+ gen8_ppgtt_free(ppgtt);
return ret;
}
@@ -573,14 +730,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[pd], 0,
+ ppgtt->pdp.page_directory[pd]->page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
if (ret)
return ret;
- ppgtt->pd_dma_addr[pd] = pd_addr;
+ ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
return 0;
}
@@ -590,22 +747,23 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pt)
{
dma_addr_t pt_addr;
- struct page *p;
+ struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
+ struct i915_page_table_entry *ptab = pdir->page_table[pt];
+ struct page *p = ptab->page;
int ret;
- p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
if (ret)
return ret;
- ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+ ptab->daddr = pt_addr;
return 0;
}
-/**
+/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
@@ -618,26 +776,30 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
- /* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ /* 1. Do all our allocations for page directories and page tables.
+ * We allocate more than was asked so that we can point the unused parts
+ * to valid entries that point to scratch page. Dynamic page tables
+ * will fix this eventually.
+ */
+ ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
if (ret)
return ret;
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
- for (i = 0; i < max_pdp; i++) {
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
if (ret)
goto bail;
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
if (ret)
goto bail;
@@ -652,11 +814,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
- for (i = 0; i < max_pdp; i++) {
- gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ gen8_pde_t *pd_vaddr;
+ pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_table_entry *pt = pd->page_table[j];
+ dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
@@ -670,9 +834,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ /* This is the area that we advertise as usable for the caller */
+ ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
+
+ /* Set all ptes to a valid scratch page. Also above requested space */
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
+ true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
@@ -691,22 +860,23 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
- gen6_gtt_pte_t __iomem *pd_addr;
- gen6_gtt_pte_t scratch_pte;
+ gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
- ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ ppgtt->pd.pd_offset,
+ ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
- gen6_gtt_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ gen6_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -717,10 +887,10 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
- for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+ for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
- (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pde * PAGE_SIZE * GEN6_PTES) +
(pte * PAGE_SIZE);
int i;
bool found = false;
@@ -743,33 +913,43 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
}
-static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(struct i915_page_directory_entry *pd,
+ const int pde, struct i915_page_table_entry *pt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
- gen6_gtt_pte_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
+ /* Caller needs to make sure the write completes if necessary */
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(pd, struct i915_hw_ppgtt, pd);
+ u32 pd_entry;
- WARN_ON(ppgtt->pd_offset & 0x3f);
- pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry |= GEN6_PDE_VALID;
- pt_addr = ppgtt->pt_dma_addr[i];
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
+ writel(pd_entry, ppgtt->pd_addr + pde);
+}
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
+/* Write all the page tables found in the ppgtt structure to incrementing page
+ * directories. */
+static void gen6_write_page_range(struct drm_i915_private *dev_priv,
+ struct i915_page_directory_entry *pd,
+ uint32_t start, uint32_t length)
+{
+ struct i915_page_table_entry *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_write_pde(pd, pde, pt);
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.pd_offset & 0x3f);
- return (ppgtt->pd_offset / 64) << 16;
+ return (ppgtt->pd.pd_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -797,6 +977,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
+static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ return 0;
+}
+
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
{
@@ -908,21 +1098,21 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
- if (last_pte > I915_PPGTT_PT_ENTRIES)
- last_pte = I915_PPGTT_PT_ENTRIES;
+ if (last_pte > GEN6_PTES)
+ last_pte = GEN6_PTES;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -942,22 +1132,22 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
- if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ if (++act_pte == GEN6_PTES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
act_pt++;
@@ -968,26 +1158,134 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
+ * are switching between contexts with the same LRCA, we also must do a force
+ * restore.
+ */
+static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ /* If current vm != vm, */
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table_entry *pt)
{
+ gen6_pte_t *pt_vaddr, scratch_pte;
int i;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
+ WARN_ON(vm->scratch.addr == 0);
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC, true, 0);
+
+ pt_vaddr = kmap_atomic(pt->page);
+
+ for (i = 0; i < GEN6_PTES; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ DECLARE_BITMAP(new_page_tables, I915_PDES);
+ struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_table_entry *pt;
+ const uint32_t start_save = start, length_save = length;
+ uint32_t pde, temp;
+ int ret;
+
+ WARN_ON(upper_32_bits(start));
+
+ bitmap_zero(new_page_tables, I915_PDES);
+
+ /* The allocation is done in two stages so that we can bail out with
+ * minimal amount of pain. The first stage finds new page tables that
+ * need allocation. The second stage marks use ptes within the page
+ * tables.
+ */
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ if (pt != ppgtt->scratch_pt) {
+ WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
+ continue;
+ }
+
+ /* We've already allocated a page table */
+ WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+
+ pt = alloc_pt_single(dev);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ gen6_initialize_pt(vm, pt);
+
+ ppgtt->pd.page_table[pde] = pt;
+ set_bit(pde, new_page_tables);
+ trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
+
+ start = start_save;
+ length = length_save;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
+
+ bitmap_zero(tmp_bitmap, GEN6_PTES);
+ bitmap_set(tmp_bitmap, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+
+ if (test_and_clear_bit(pde, new_page_tables))
+ gen6_write_pde(&ppgtt->pd, pde, pt);
+
+ trace_i915_page_table_entry_map(vm, pde, pt,
+ gen6_pte_index(start),
+ gen6_pte_count(start, length),
+ GEN6_PTES);
+ bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
+ GEN6_PTES);
+ }
+
+ WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
+
+ mark_tlbs_dirty(ppgtt);
+ return 0;
+
+unwind_out:
+ for_each_set_bit(pde, new_page_tables, I915_PDES) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ unmap_and_free_pt(pt, vm->dev);
+ }
+
+ mark_tlbs_dirty(ppgtt);
+ return ret;
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
+
+ if (pt != ppgtt->scratch_pt)
+ unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
+ }
+
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ unmap_and_free_pd(&ppgtt->pd);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -997,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
}
@@ -1013,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
+
+ gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
@@ -1026,88 +1329,43 @@ alloc:
0, dev_priv->gtt.base.total,
0);
if (ret)
- return ret;
+ goto err_out;
retried = true;
goto alloc;
}
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
-
- ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- return ret;
-}
-
-static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
-{
- int i;
-
- ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
- GFP_KERNEL);
+ if (ret)
+ goto err_out;
- if (!ppgtt->pt_pages)
- return -ENOMEM;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i]) {
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
- }
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->num_pd_entries = I915_PDES;
return 0;
+
+err_out:
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ return ret;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
- int ret;
-
- ret = gen6_ppgtt_allocate_page_directories(ppgtt);
- if (ret)
- return ret;
-
- ret = gen6_ppgtt_allocate_page_tables(ppgtt);
- if (ret) {
- drm_mm_remove_node(&ppgtt->node);
- return ret;
- }
-
- ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr) {
- drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
-
- return 0;
+ return gen6_ppgtt_allocate_page_directories(ppgtt);
}
-static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
{
- struct drm_device *dev = ppgtt->base.dev;
- int i;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- gen6_ppgtt_unmap_pages(ppgtt);
- return -EIO;
- }
-
- ppgtt->pt_dma_addr[i] = pt_addr;
- }
+ struct i915_page_table_entry *unused;
+ uint32_t pde, temp;
- return 0;
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1123,40 +1381,57 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else
BUG();
+ if (intel_vgpu_active(dev))
+ ppgtt->switch_mm = vgpu_mm_switch;
+
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
- ret = gen6_ppgtt_setup_page_tables(ppgtt);
- if (ret) {
- gen6_ppgtt_free(ppgtt);
- return ret;
+ if (aliasing) {
+ /* preallocate all pts */
+ ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
+ ppgtt->base.dev);
+
+ if (ret) {
+ gen6_ppgtt_cleanup(&ppgtt->base);
+ return ret;
+ }
}
+ ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+ ppgtt->pd.pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+
+ if (aliasing)
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ else
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
- gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd_offset << 10);
+ ppgtt->pd.pd_offset << 10);
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
+ bool aliasing)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1164,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- return gen6_ppgtt_init(ppgtt);
+ return gen6_ppgtt_init(ppgtt, aliasing);
else
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
@@ -1173,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(dev, ppgtt, false);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1420,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
- if (i915_is_ggtt(vm)) {
- if (dev_priv->mm.aliasing_ppgtt)
- gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
- continue;
- }
+ if (USES_PPGTT(dev)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt,
+ base);
+
+ if (i915_is_ggtt(vm))
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
- gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
+ gen6_write_page_range(dev_priv, &ppgtt->pd,
+ 0, ppgtt->base.total);
+ }
}
i915_ggtt_flush(dev_priv);
@@ -1447,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
@@ -1464,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen8_gtt_pte_t __iomem *gtt_entries =
- (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t __iomem *gtt_entries =
+ (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -1510,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen6_gtt_pte_t __iomem *gtt_entries =
- (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t __iomem *gtt_entries =
+ (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -1549,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1575,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1633,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ struct sg_table *pages = obj->pages;
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
+ if (i915_is_ggtt(vma->vm))
+ pages = vma->ggtt_view.pages;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1652,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
+ vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1663,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->ggtt_view.pages,
+ appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -1753,6 +2036,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
/* Subtract the guard page ... */
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
+
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
+
+ if (intel_vgpu_active(dev)) {
+ ret = intel_vgt_balloon(dev);
+ if (ret)
+ return ret;
+ }
+
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
@@ -1772,9 +2065,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
}
- dev_priv->gtt.base.start = start;
- dev_priv->gtt.base.total = end - start;
-
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -1793,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt);
- if (ret != 0)
+ ret = __hw_ppgtt_init(dev, ppgtt, true);
+ if (ret) {
+ kfree(ppgtt);
return ret;
+ }
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
@@ -1826,6 +2118,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
}
if (drm_mm_initialized(&vm->mm)) {
+ if (intel_vgpu_active(dev))
+ intel_vgt_deballoon();
+
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
@@ -2078,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
@@ -2123,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ret = ggtt_probe_common(dev, gtt_size);
@@ -2228,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+static struct i915_vma *
+__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view)
{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -2241,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
- vma->ggtt_view = *view;
if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
+ vma->ggtt_view = *ggtt_view;
+
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
@@ -2253,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
} else {
BUG_ON(!i915_is_ggtt(vm));
+ vma->ggtt_view = *ggtt_view;
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
}
@@ -2265,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(!view))
+ return ERR_PTR(-EINVAL);
+
+ vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
- vma = __i915_gem_vma_create(obj, vm, view);
+ vma = __i915_gem_vma_create(obj, ggtt, view);
return vma;
+
+}
+
+static void
+rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
+ struct sg_table *st)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+ struct scatterlist *sg = st->sgl;
+
+ st->nents = 0;
+
+ for (column = 0; column < width; column++) {
+ src_idx = width * (height - 1) + column;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = in[src_idx];
+ sg_dma_len(sg) = PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= width;
+ }
+ }
+}
+
+static struct sg_table *
+intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
+ unsigned long size, pages, rot_pages;
+ struct sg_page_iter sg_iter;
+ unsigned long i;
+ dma_addr_t *page_addr_list;
+ struct sg_table *st;
+ unsigned int tile_pitch, tile_height;
+ unsigned int width_pages, height_pages;
+ int ret = -ENOMEM;
+
+ pages = obj->base.size / PAGE_SIZE;
+
+ /* Calculate tiling geometry. */
+ tile_height = intel_tile_height(dev, rot_info->pixel_format,
+ rot_info->fb_modifier);
+ tile_pitch = PAGE_SIZE / tile_height;
+ width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
+ height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
+ rot_pages = width_pages * height_pages;
+ size = rot_pages * PAGE_SIZE;
+
+ /* Allocate a temporary list of source pages for random access. */
+ page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ if (!page_addr_list)
+ return ERR_PTR(ret);
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ /* Populate source page list from the object. */
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
+ i++;
+ }
+
+ /* Rotate the pages. */
+ rotate_pages(page_addr_list, width_pages, height_pages, st);
+
+ DRM_DEBUG_KMS(
+ "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
+ size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+
+ drm_free_large(page_addr_list);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ drm_free_large(page_addr_list);
+
+ DRM_DEBUG_KMS(
+ "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
+ size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+ return ERR_PTR(ret);
}
-static inline
-int i915_get_vma_pages(struct i915_vma *vma)
+static inline int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
+ int ret = 0;
+
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ vma->ggtt_view.pages =
+ intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
- DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
- return -EINVAL;
+ ret = -EINVAL;
+ } else if (IS_ERR(vma->ggtt_view.pages)) {
+ ret = PTR_ERR(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
- return 0;
+ return ret;
}
/**
@@ -2312,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret = i915_get_vma_pages(vma);
+ if (i915_is_ggtt(vma->vm)) {
+ int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
vma->bind_vma(vma, cache_level, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e377c7d27bd4..fc03c99317c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,13 +36,13 @@
struct drm_i915_file_private;
-typedef uint32_t gen6_gtt_pte_t;
-typedef uint64_t gen8_gtt_pte_t;
-typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+typedef uint32_t gen6_pte_t;
+typedef uint64_t gen8_pte_t;
+typedef uint64_t gen8_pde_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PPGTT_PD_ENTRIES 512
-#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
#define GEN6_PDE_VALID (1 << 0)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
@@ -88,9 +95,8 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
-#define GEN8_LEGACY_PDPS 4
-#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
-#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPES 4
+#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED
+};
+
+struct intel_rotation_info {
+ unsigned int height;
+ unsigned int pitch;
+ uint32_t pixel_format;
+ uint64_t fb_modifier;
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
+
+ union {
+ struct intel_rotation_info rotation_info;
+ };
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
+extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
@@ -187,6 +206,28 @@ struct i915_vma {
u32 flags);
};
+struct i915_page_table_entry {
+ struct page *page;
+ dma_addr_t daddr;
+
+ unsigned long *used_ptes;
+};
+
+struct i915_page_directory_entry {
+ struct page *page; /* NULL for GEN6-GEN7 */
+ union {
+ uint32_t pd_offset;
+ dma_addr_t daddr;
+ };
+
+ struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
+};
+
+struct i915_page_directory_pointer_entry {
+ /* struct page *page; */
+ struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
@@ -223,9 +264,12 @@ struct i915_address_space {
struct list_head inactive_list;
/* FIXME: Need a more generic return type */
- gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ gen6_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags); /* Create a valid PTE */
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -269,30 +313,90 @@ struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
+ unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
- struct page **pt_pages;
- struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
- };
- struct page *pd_pages;
- union {
- uint32_t pd_offset;
- dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
- };
- union {
- dma_addr_t *pt_dma_addr;
- dma_addr_t *gen8_pt_dma_addr[4];
+ struct i915_page_directory_pointer_entry pdp;
+ struct i915_page_directory_entry pd;
};
+ struct i915_page_table_entry *scratch_pt;
+
struct drm_i915_file_private *file_priv;
+ gen6_pte_t __iomem *pd_addr;
+
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen6_pde_index(start); \
+ pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+ temp = min_t(unsigned, temp, length), \
+ start += temp, length -= temp)
+
+static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+{
+ const uint32_t mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/* Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+*/
+static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
+ uint32_t pde_shift)
+{
+ const uint64_t mask = ~((1 << pde_shift) - 1);
+ uint64_t end;
+
+ WARN_ON(length == 0);
+ WARN_ON(offset_in_page(addr|length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline uint32_t gen6_pte_index(uint32_t addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline uint32_t gen6_pde_index(uint32_t addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -321,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+static inline bool
+i915_ggtt_view_equal(const struct i915_ggtt_view *a,
+ const struct i915_ggtt_view *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+
+ return a->type == b->type;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 000000000000..f7929e769250
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @dev_priv: i915 device
+ * @target: amount of memory to make available, in pages
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
+{
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
+ unsigned long count = 0;
+
+ /*
+ * As we may completely rewrite the (un)bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ for (phase = phases; phase->list; phase++) {
+ struct list_head still_in_list;
+
+ if ((flags & phase->bit) == 0)
+ continue;
+
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(phase->list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
+
+ obj = list_first_entry(phase->list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (flags & I915_SHRINK_PURGEABLE &&
+ obj->madv != I915_MADV_DONTNEED)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, phase->list);
+ }
+
+ return count;
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches completely
+ * @dev_priv: i915 device
+ *
+ * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * caches completely. It also first waits for and retires all outstanding
+ * requests to also be able to release backing storage for active objects.
+ *
+ * This should only be used in code to intentionally quiescent the gpu or as a
+ * last-ditch effort when memory seems to have run out.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ i915_gem_evict_everything(dev_priv->dev);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
+}
+
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ if (obj->pages_pin_count == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long freed;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
+ if (freed < sc->nr_to_scan)
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return freed;
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed_pages;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_shrinker_init - Initialize i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function registers and sets up the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+{
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9c6f93ec886b..f8da71682c96 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -231,7 +231,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.size = size / dev_priv->fbc.threshold;
+ dev_priv->fbc.uncompressed_size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->fbc.size)
+ if (size <= dev_priv->fbc.uncompressed_size)
return 0;
/* Release any current block */
@@ -266,7 +266,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbc.size == 0)
+ if (dev_priv->fbc.uncompressed_size == 0)
return;
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
@@ -276,7 +276,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
kfree(dev_priv->fbc.compressed_llb);
}
- dev_priv->fbc.size = 0;
+ dev_priv->fbc.uncompressed_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48ddbf44c862..1d4e60df8883 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
@@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore_obj);
+
+ for (i = 0; i < error->vm_count; i++)
+ kfree(error->active_bo[i]);
+
kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo);
+ kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -994,12 +1006,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
- if (request->file_priv) {
+ if (request->pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->file_priv->file->pid,
- PIDTYPE_PID);
+ task = pid_task(request->pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
@@ -1165,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ }
+
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ede5bbbd8a08..6d494432b19f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
+ dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
-
- dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
+
+ synchronize_irq(dev->irq);
}
/**
@@ -492,31 +491,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-/**
- * i915_pipe_enabled - check if a pipe is enabled
- * @dev: DRM device
- * @pipe: pipe to check
- *
- * Reading certain registers when the pipe is disabled can hang the chip.
- * Use this routine to make sure the PLL is running and the pipe is active
- * before reading such registers if unsure.
- */
-static int
-i915_pipe_enabled(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Locking is horribly broken here, but whatever. */
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return intel_crtc->active;
- } else {
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
- }
-}
-
/*
* This timing diagram depicts the video signal in and
* around the vertical blanking period.
@@ -582,34 +556,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config->base.adjusted_mode;
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- const struct drm_display_mode *mode =
- &intel_crtc->config->base.adjusted_mode;
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vbl_start = mode->crtc_vblank_start;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- } else {
- enum transcoder cpu_transcoder = (enum transcoder) pipe;
-
- htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
- hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
- vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
- if ((I915_READ(PIPECONF(cpu_transcoder)) &
- PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- }
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
/* Convert to pixel count */
vbl_start *= htotal;
@@ -648,12 +604,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
return I915_READ(reg);
}
@@ -840,7 +790,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL;
}
- if (!crtc->enabled) {
+ if (!crtc->state->enable) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
@@ -1046,129 +996,73 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
}
-static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *rps_ei)
+static void vlv_c0_read(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *ei)
{
- u32 cz_ts, cz_freq_khz;
- u32 render_count, media_count;
- u32 elapsed_render, elapsed_media, elapsed_time;
- u32 residency = 0;
-
- cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
- cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
-
- render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
- media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
-
- if (rps_ei->cz_clock == 0) {
- rps_ei->cz_clock = cz_ts;
- rps_ei->render_c0 = render_count;
- rps_ei->media_c0 = media_count;
-
- return dev_priv->rps.cur_freq;
- }
-
- elapsed_time = cz_ts - rps_ei->cz_clock;
- rps_ei->cz_clock = cz_ts;
+ ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
+ ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+}
- elapsed_render = render_count - rps_ei->render_c0;
- rps_ei->render_c0 = render_count;
+static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+ const struct intel_rps_ei *old,
+ const struct intel_rps_ei *now,
+ int threshold)
+{
+ u64 time, c0;
- elapsed_media = media_count - rps_ei->media_c0;
- rps_ei->media_c0 = media_count;
+ if (old->cz_clock == 0)
+ return false;
- /* Convert all the counters into common unit of milli sec */
- elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
- elapsed_render /= cz_freq_khz;
- elapsed_media /= cz_freq_khz;
+ time = now->cz_clock - old->cz_clock;
+ time *= threshold * dev_priv->mem_freq;
- /*
- * Calculate overall C0 residency percentage
- * only if elapsed time is non zero
+ /* Workload can be split between render + media, e.g. SwapBuffers
+ * being blitted in X after being rendered in mesa. To account for
+ * this we need to combine both engines into our activity counter.
*/
- if (elapsed_time) {
- residency =
- ((max(elapsed_render, elapsed_media) * 100)
- / elapsed_time);
- }
+ c0 = now->render_c0 - old->render_c0;
+ c0 += now->media_c0 - old->media_c0;
+ c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
- return residency;
+ return c0 >= time;
}
-/**
- * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
- * busy-ness calculated from C0 counters of render & media power wells
- * @dev_priv: DRM device private
- *
- */
-static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- u32 residency_C0_up = 0, residency_C0_down = 0;
- int new_delay, adj;
-
- dev_priv->rps.ei_interrupt_count++;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-
- if (dev_priv->rps.up_ei.cz_clock == 0) {
- vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
- vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
- return dev_priv->rps.cur_freq;
- }
+ vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+ dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+}
+static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ struct intel_rps_ei now;
+ u32 events = 0;
- /*
- * To down throttle, C0 residency should be less than down threshold
- * for continous EI intervals. So calculate down EI counters
- * once in VLV_INT_COUNT_FOR_DOWN_EI
- */
- if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+ if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ return 0;
- dev_priv->rps.ei_interrupt_count = 0;
+ vlv_c0_read(dev_priv, &now);
+ if (now.cz_clock == 0)
+ return 0;
- residency_C0_down = vlv_c0_residency(dev_priv,
- &dev_priv->rps.down_ei);
- } else {
- residency_C0_up = vlv_c0_residency(dev_priv,
- &dev_priv->rps.up_ei);
+ if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+ if (!vlv_c0_above(dev_priv,
+ &dev_priv->rps.down_ei, &now,
+ VLV_RP_DOWN_EI_THRESHOLD))
+ events |= GEN6_PM_RP_DOWN_THRESHOLD;
+ dev_priv->rps.down_ei = now;
}
- new_delay = dev_priv->rps.cur_freq;
-
- adj = dev_priv->rps.last_adj;
- /* C0 residency is greater than UP threshold. Increase Frequency */
- if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else
- adj = 1;
-
- if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
-
- /*
- * For better performance, jump directly
- * to RPe if we're below it.
- */
- if (new_delay < dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
-
- } else if (!dev_priv->rps.ei_interrupt_count &&
- (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
- if (adj < 0)
- adj *= 2;
- else
- adj = -1;
- /*
- * This means, C0 residency is less than down threshold over
- * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
- */
- if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
+ if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ if (vlv_c0_above(dev_priv,
+ &dev_priv->rps.up_ei, &now,
+ VLV_RP_UP_EI_THRESHOLD))
+ events |= GEN6_PM_RP_UP_THRESHOLD;
+ dev_priv->rps.up_ei = now;
}
- return new_delay;
+ return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1198,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1220,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
@@ -1243,10 +1137,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, new_delay);
- else
- gen6_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -1748,11 +1639,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- /* TODO: RPS on GEN9+ is not supported yet. */
- if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
- "GEN9+: unexpected RPS IRQ\n"))
- return;
-
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
@@ -2662,9 +2548,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -2684,9 +2567,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2699,9 +2579,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2715,9 +2592,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -2769,9 +2643,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -3236,15 +3107,24 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
spin_lock_irq(&dev_priv->irq_lock);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ if (pipe_mask & 1 << PIPE_A)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
+ dev_priv->de_irq_mask[PIPE_A],
+ ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
+ if (pipe_mask & 1 << PIPE_B)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
+ dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+ if (pipe_mask & 1 << PIPE_C)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
+ dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3718,14 +3598,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);
I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);
@@ -3887,14 +3765,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4362,7 +4238,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4392,10 +4268,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- }
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 44f2262a5553..bb64415a1c3e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -27,7 +27,6 @@
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
- .powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
@@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = {
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
+ .load_detect_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named(powersave, i915.powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
@@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
+module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
+MODULE_PARM_DESC(load_detect_test,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
@@ -171,10 +172,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
- "Enable the MMIO debug code (default: false). This may negatively "
- "affect performance.");
+ "Enable the MMIO debug code for the first N failures (default: off). "
+ "This may negatively affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33b3d0a24071..b522eb6e59a4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,7 +139,21 @@
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_R_PWR_CLK_STATE 0x20C8
+#define GEN8_RPCS_ENABLE (1 << 31)
+#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
+#define GEN8_RPCS_S_CNT_SHIFT 15
+#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
+#define GEN8_RPCS_SS_CNT_SHIFT 8
+#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define GEN8_RPCS_EU_MAX_SHIFT 4
+#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define GEN8_RPCS_EU_MIN_SHIFT 0
+#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
#define GAM_ECOCHK 0x4090
+#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
@@ -552,6 +566,9 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */
+#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */
+#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
@@ -586,6 +603,19 @@ enum punit_power_well {
PUNIT_POWER_WELL_NUM,
};
+enum skl_disp_power_wells {
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_DDI_A_E,
+ SKL_DISP_PW_DDI_B,
+ SKL_DISP_PW_DDI_C,
+ SKL_DISP_PW_DDI_D,
+ SKL_DISP_PW_1 = 14,
+ SKL_DISP_PW_2,
+};
+
+#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
+#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
+
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
@@ -614,6 +644,11 @@ enum punit_power_well {
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+#define PUNIT_REG_DDR_SETUP2 0x139
+#define FORCE_DDR_FREQ_REQ_ACK (1 << 8)
+#define FORCE_DDR_LOW_FREQ (1 << 1)
+#define FORCE_DDR_HIGH_FREQ (1 << 0)
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -638,7 +673,6 @@ enum punit_power_well {
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
-#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1002,6 +1036,7 @@ enum punit_power_well {
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
#define _CHV_PLL_DW6_CH0 0x8018
@@ -1011,6 +1046,19 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_PLL_DW8_CH0 0x8020
+#define _CHV_PLL_DW8_CH1 0x81A0
+#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
+#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
+#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
+
+#define _CHV_PLL_DW9_CH0 0x8024
+#define _CHV_PLL_DW9_CH1 0x81A4
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
+#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
+
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1258,6 +1306,9 @@ enum punit_power_well {
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+#define GEN8_FAULT_TLB_DATA0 0x04b10
+#define GEN8_FAULT_TLB_DATA1 0x04b14
+
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1314,6 +1365,8 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -1470,6 +1523,7 @@ enum punit_power_well {
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
@@ -1482,6 +1536,8 @@ enum punit_power_well {
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
@@ -1491,6 +1547,17 @@ enum punit_power_well {
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+#define GEN8_FUSE2 0x9120
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 0x9134
+#define GEN8_EU_DISABLE1 0x9138
+#define GEN8_EU_DISABLE2 0x913c
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -2048,6 +2115,14 @@ enum punit_power_well {
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
/*
@@ -2376,6 +2451,12 @@ enum punit_power_well {
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
+#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+ INTERVAL_1_33_US(us) : \
+ INTERVAL_1_28_US(us))
+
/*
* Logical Context regs
*/
@@ -2968,7 +3049,7 @@ enum punit_power_well {
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
-/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
@@ -3865,6 +3946,7 @@ enum punit_power_well {
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
@@ -4013,7 +4095,7 @@ enum punit_power_well {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB 0x70030
+#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -4021,6 +4103,9 @@ enum punit_power_well {
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
+#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
@@ -4044,8 +4129,8 @@ enum punit_power_well {
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_CURSORA_MASK (0x3f<<8)
-#define DSPFW_PLANEC_SHIFT_OLD 0
-#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
@@ -4084,25 +4169,25 @@ enum punit_power_well {
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
@@ -4111,7 +4196,7 @@ enum punit_power_well {
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (1<<24)
+#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
@@ -4132,7 +4217,7 @@ enum punit_power_well {
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (1<<24)
+#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
@@ -4153,21 +4238,17 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
-#define DRAIN_LATENCY_PRECISION_16 16
-#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_HIGH (1<<31)
-#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_HIGH (1<<7)
-#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1<<7)
+#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
+#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1<<31)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -5221,14 +5302,22 @@ enum punit_power_well {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define FF_SLICE_CS_CHICKEN2 0x02e4
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
-#define HIZ_CHICKEN 0x7018
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+#define HIZ_CHICKEN 0x7018
+# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
+#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -5245,11 +5334,16 @@ enum punit_power_well {
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define GEN8_L3SQCREG4 0xb118
+#define GEN8_LQSC_RO_PERF_DIS (1<<27)
+
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
+#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5258,6 +5352,9 @@ enum punit_power_well {
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define BDW_SCRATCH1 0xb11c
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -5980,6 +6077,7 @@ enum punit_power_well {
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
+#define EDRAM_ENABLED 0x1
#define GEN6_UCGCTL1 0x9400
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
@@ -6003,6 +6101,7 @@ enum punit_power_well {
#define GEN6_RSTCTL 0x9420
#define GEN8_UCGCTL6 0x9430
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_GFXPAUSE 0xA000
@@ -6010,6 +6109,7 @@ enum punit_power_well {
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -6028,8 +6128,10 @@ enum punit_power_well {
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
+#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -6120,8 +6222,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
-#define VLV_RENDER_C0_COUNT_REG 0x138118
-#define VLV_MEDIA_C0_COUNT_REG 0x13811C
+#define VLV_RENDER_C0_COUNT 0x138118
+#define VLV_MEDIA_C0_COUNT 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -6155,6 +6257,37 @@ enum punit_power_well {
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define CHV_POWER_SS0_SIG1 0xa720
+#define CHV_POWER_SS1_SIG1 0xa728
+#define CHV_SS_PG_ENABLE (1<<1)
+#define CHV_EU08_PG_ENABLE (1<<9)
+#define CHV_EU19_PG_ENABLE (1<<17)
+#define CHV_EU210_PG_ENABLE (1<<25)
+
+#define CHV_POWER_SS0_SIG2 0xa724
+#define CHV_POWER_SS1_SIG2 0xa72c
+#define CHV_EU311_PG_ENABLE (1<<1)
+
+#define GEN9_SLICE0_PGCTL_ACK 0x804c
+#define GEN9_SLICE1_PGCTL_ACK 0x8050
+#define GEN9_SLICE2_PGCTL_ACK 0x8054
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+
+#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
+#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
+#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
+#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
+#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
+#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
@@ -6185,6 +6318,7 @@ enum punit_power_well {
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
@@ -6200,8 +6334,12 @@ enum punit_power_well {
#define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define GEN9_HALF_SLICE_CHICKEN7 0xe194
+#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
+
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
@@ -6351,6 +6489,13 @@ enum punit_power_well {
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
+/* SKL Fuse Status */
+#define SKL_FUSE_STATUS 0x42000
+#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
+#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
+#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+
/* Per-pipe DDI Function Control */
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 9f19ed38cdc3..cf67f82f7b7f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,166 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- return I915_READ8(data_port);
-}
-
-static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- return I915_READ8(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- I915_WRITE8(VGA_AR_DATA_WRITE, val);
-}
-
-static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- I915_WRITE8(data_port, val);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
-
- /* VGA color palette registers */
- dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
-
- /* MSR bits */
- dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(dev, cr_index, cr_data, 0x11,
- i915_read_indexed(dev, cr_index, cr_data, 0x11) &
- (~0x80));
- for (i = 0; i <= 0x24; i++)
- dev_priv->regfile.saveCR[i] =
- i915_read_indexed(dev, cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- dev_priv->regfile.saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- I915_READ8(st01);
- dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
- I915_READ8(st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- dev_priv->regfile.saveGR[i] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
-
- dev_priv->regfile.saveGR[0x10] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->regfile.saveGR[0x11] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->regfile.saveGR[0x18] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveSR[i] =
- i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
-
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- /* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->regfile.saveSR[i]);
-
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->regfile.saveGR[i]);
-
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->regfile.saveGR[0x10]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->regfile.saveGR[0x11]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->regfile.saveGR[0x18]);
-
- /* Attribute controller registers */
- I915_READ8(st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
- I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
- I915_READ8(st01);
-
- /* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
- /* This is only meaningful in non-KMS mode */
- /* Don't regfile.save them in KMS mode */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_display_reg(dev);
-
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_display_reg(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- mask = ~LVDS_PORT_EN;
+ mask = ~LVDS_PORT_EN;
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_vga(dev);
- else
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
@@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDEIER = I915_READ(DEIER);
- dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
- dev_priv->regfile.saveGTIER = I915_READ(GTIER);
- dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
- dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->regfile.saveIER = I915_READ(IER);
- dev_priv->regfile.saveIMR = I915_READ(IMR);
- }
- }
-
if (IS_GEN4(dev))
pci_read_config_word(dev->pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
- I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
- I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
- I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else {
- I915_WRITE(IER, dev_priv->regfile.saveIER);
- I915_WRITE(IMR, dev_priv->regfile.saveIMR);
- }
- }
-
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 49f5ade0edb7..247626885f49 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
+static ssize_t
+show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
@@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
+
+static struct attribute *media_rc6_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group media_rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = media_rc6_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -300,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEN9(dev_priv))
+ ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -402,10 +423,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,10 +482,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -493,38 +508,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap;
- ssize_t ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ u32 val;
- if (attr == &dev_attr_gt_RP0_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x0000ff) >> 0));
- } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x00ff00) >> 8));
- } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0xff0000) >> 16));
- } else {
+ if (attr == &dev_attr_gt_RP0_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else if (attr == &dev_attr_gt_RP1_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else if (attr == &dev_attr_gt_RPn_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
BUG();
- }
+
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
@@ -633,6 +627,12 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n");
}
+ if (IS_VALLEYVIEW(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &media_rc6_attr_group);
+ if (ret)
+ DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index d776621c8521..5fda6c70b423 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -114,7 +114,7 @@ TRACE_EVENT(i915_vma_bind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
__field(unsigned, flags)
),
@@ -127,7 +127,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags;
),
- TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
@@ -140,7 +140,7 @@ TRACE_EVENT(i915_vma_unbind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
),
@@ -151,10 +151,109 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
+#define VM_TO_TRACE_NAME(vm) \
+ (i915_is_ggtt(vm) ? "G" : \
+ "P")
+
+DECLARE_EVENT_CLASS(i915_va,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, end)
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->start = start;
+ __entry->end = start + length - 1;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("vm=%p (%s), 0x%llx-0x%llx",
+ __entry->vm, __get_str(name), __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_va, i915_va_alloc,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name)
+);
+
+DECLARE_EVENT_CLASS(i915_page_table_entry,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u64, start)
+ __field(u64, end)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->start = start;
+ __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
+ ),
+
+ TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift)
+);
+
+/* Avoid extra math because we only support two sizes. The format is defined by
+ * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
+#define TRACE_PT_SIZE(bits) \
+ ((((bits) == 1024) ? 288 : 144) + 1)
+
+DECLARE_EVENT_CLASS(i915_page_table_entry_update,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u32, first)
+ __field(u32, last)
+ __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->first = first;
+ __entry->last = first + count - 1;
+ scnprintf(__get_str(cur_ptes),
+ TRACE_PT_SIZE(bits),
+ "%*pb",
+ bits,
+ pt->used_ptes);
+ ),
+
+ TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
+ __entry->vm, __entry->pde, __entry->last, __entry->first,
+ __get_str(cur_ptes))
+);
+
+DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits)
+);
+
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
deleted file mode 100644
index d10fe3e9c49f..000000000000
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- * Copyright 2013 (c) Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "intel_drv.h"
-#include "i915_reg.h"
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
-void i915_save_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
- }
- /* FIXME: regfile.save TV & SDVO state */
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- }
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_read_config_byte(dev->pdev, PCI_LBPC,
- &dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- } else {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- }
-
- return;
-}
-
-void i915_restore_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_write_config_byte(dev->pdev, PCI_LBPC,
- dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
- * otherwise we get blank eDP screen after S3 on some machines
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
- } else {
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
- }
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
- }
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
-
- return;
-}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
new file mode 100644
index 000000000000..5eee75bff170
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_drv.h"
+#include "i915_vgpu.h"
+
+/**
+ * DOC: Intel GVT-g guest support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides vGPU specific
+ * optimizations when running in a virtual machine, to reduce the complexity
+ * of vGPU emulation and to improve the overall performance.
+ *
+ * A primary function introduced here is so-called "address space ballooning"
+ * technique. Intel GVT-g partitions global graphics memory among multiple VMs,
+ * so each VM can directly access a portion of the memory without hypervisor's
+ * intervention, e.g. filling textures or queuing commands. However with the
+ * partitioning an unmodified i915 driver would assume a smaller graphics
+ * memory starting from address ZERO, then requires vGPU emulation module to
+ * translate the graphics address between 'guest view' and 'host view', for
+ * all registers and command opcodes which contain a graphics memory address.
+ * To reduce the complexity, Intel GVT-g introduces "address space ballooning",
+ * by telling the exact partitioning knowledge to each guest i915 driver, which
+ * then reserves and prevents non-allocated portions from allocation. Thus vGPU
+ * emulation module only needs to scan and validate graphics addresses without
+ * complexity of address translation.
+ *
+ */
+
+/**
+ * i915_check_vgpu - detect virtual GPU
+ * @dev: drm device *
+ *
+ * This function is called at the initialization stage, to detect whether
+ * running on a vGPU.
+ */
+void i915_check_vgpu(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint64_t magic;
+ uint32_t version;
+
+ BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ magic = readq(dev_priv->regs + vgtif_reg(magic));
+ if (magic != VGT_MAGIC)
+ return;
+
+ version = INTEL_VGT_IF_VERSION_ENCODE(
+ readw(dev_priv->regs + vgtif_reg(version_major)),
+ readw(dev_priv->regs + vgtif_reg(version_minor)));
+ if (version != INTEL_VGT_IF_VERSION) {
+ DRM_INFO("VGT interface version mismatch!\n");
+ return;
+ }
+
+ dev_priv->vgpu.active = true;
+ DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+}
+
+struct _balloon_info_ {
+ /*
+ * There are up to 2 regions per mappable/unmappable graphic
+ * memory that might be ballooned. Here, index 0/1 is for mappable
+ * graphic memory, 2/3 for unmappable graphic memory.
+ */
+ struct drm_mm_node space[4];
+};
+
+static struct _balloon_info_ bl_info;
+
+/**
+ * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ *
+ * This function is called to deallocate the ballooned-out graphic memory, when
+ * driver is unloaded or when ballooning fails.
+ */
+void intel_vgt_deballoon(void)
+{
+ int i;
+
+ DRM_DEBUG("VGT deballoon.\n");
+
+ for (i = 0; i < 4; i++) {
+ if (bl_info.space[i].allocated)
+ drm_mm_remove_node(&bl_info.space[i]);
+ }
+
+ memset(&bl_info, 0, sizeof(bl_info));
+}
+
+static int vgt_balloon_space(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long start, unsigned long end)
+{
+ unsigned long size = end - start;
+
+ if (start == end)
+ return -EINVAL;
+
+ DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ start, end, size / 1024);
+
+ node->start = start;
+ node->size = size;
+
+ return drm_mm_reserve_node(mm, node);
+}
+
+/**
+ * intel_vgt_balloon - balloon out reserved graphics address trunks
+ * @dev: drm device
+ *
+ * This function is called at the initialization stage, to balloon out the
+ * graphic address space allocated to other vGPUs, by marking these spaces as
+ * reserved. The ballooning related knowledge(starting address and size of
+ * the mappable/unmappable graphic memory) is described in the vgt_if structure
+ * in a reserved mmio range.
+ *
+ * To give an example, the drawing below depicts one typical scenario after
+ * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
+ * out each for the mappable and the non-mappable part. From the vGPU1 point of
+ * view, the total size is the same as the physical one, with the start address
+ * of its graphic space being zero. Yet there are some portions ballooned out(
+ * the shadow part, which are marked as reserved by drm allocator). From the
+ * host point of view, the graphic address space is partitioned by multiple
+ * vGPUs in different VMs.
+ *
+ * vGPU1 view Host view
+ * 0 ------> +-----------+ +-----------+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * mappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * v |///////////| | Host |
+ * +=======+===========+ +===========+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * unmappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * | |///////////| | Host |
+ * v |///////////| | |
+ * total GM size ------> +-----------+ +-----------+
+ *
+ * Returns:
+ * zero on success, non-zero if configuration invalid or ballooning failed
+ */
+int intel_vgt_balloon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+
+ unsigned long mappable_base, mappable_size, mappable_end;
+ unsigned long unmappable_base, unmappable_size, unmappable_end;
+ int ret;
+
+ mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+
+ mappable_end = mappable_base + mappable_size;
+ unmappable_end = unmappable_base + unmappable_size;
+
+ DRM_INFO("VGT ballooning configuration:\n");
+ DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ mappable_base, mappable_size / 1024);
+ DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ unmappable_base, unmappable_size / 1024);
+
+ if (mappable_base < ggtt_vm->start ||
+ mappable_end > dev_priv->gtt.mappable_end ||
+ unmappable_base < dev_priv->gtt.mappable_end ||
+ unmappable_end > ggtt_vm_end) {
+ DRM_ERROR("Invalid ballooning configuration!\n");
+ return -EINVAL;
+ }
+
+ /* Unmappable graphic memory ballooning */
+ if (unmappable_base > dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[2],
+ dev_priv->gtt.mappable_end,
+ unmappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * No need to partition out the last physical page,
+ * because it is reserved to the guard page.
+ */
+ if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[3],
+ unmappable_end,
+ ggtt_vm_end - PAGE_SIZE);
+ if (ret)
+ goto err;
+ }
+
+ /* Mappable graphic memory ballooning */
+ if (mappable_base > ggtt_vm->start) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[0],
+ ggtt_vm->start, mappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ if (mappable_end < dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[1],
+ mappable_end,
+ dev_priv->gtt.mappable_end);
+
+ if (ret)
+ goto err;
+ }
+
+ DRM_INFO("VGT balloon successfully\n");
+ return 0;
+
+err:
+ DRM_ERROR("VGT balloon fail\n");
+ intel_vgt_deballoon();
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
new file mode 100644
index 000000000000..97a88b5f6a26
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_VGPU_H_
+#define _I915_VGPU_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE 0x78000
+#define VGT_PVINFO_SIZE 0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+ INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+struct vgt_if {
+ uint64_t magic; /* VGT_MAGIC */
+ uint16_t version_major;
+ uint16_t version_minor;
+ uint32_t vgt_id; /* ID of vGT instance */
+ uint32_t rsv1[12]; /* pad to offset 0x40 */
+ /*
+ * Data structure to describe the balooning info of resources.
+ * Each VM can only have one portion of continuous area for now.
+ * (May support scattered resource in future)
+ * (starting from offset 0x40)
+ */
+ struct {
+ /* Aperture register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } mappable_gmadr; /* aperture */
+ /* GMADR register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } nonmappable_gmadr; /* non aperture */
+ /* allowed fence registers */
+ uint32_t fence_num;
+ uint32_t rsv2[3];
+ } avail_rs; /* available/assigned resource */
+ uint32_t rsv3[0x200 - 24]; /* pad to half page */
+ /*
+ * The bottom half page is for response from Gfx driver to hypervisor.
+ * Set to reserved fields temporarily by now.
+ */
+ uint32_t rsv4;
+ uint32_t display_ready; /* ready for display owner switch */
+ uint32_t rsv5[0x200 - 2]; /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
+
+extern void i915_check_vgpu(struct drm_device *dev);
+extern int intel_vgt_balloon(struct drm_device *dev);
+extern void intel_vgt_deballoon(void);
+
+#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 19a9dd5408f3..3903b90fb64e 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev,
* FIXME: The proper sequence here will eventually be:
*
* drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, state);
* drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, state);
* drm_atomic_helper_wait_for_vblanks(dev, state);
* drm_atomic_helper_cleanup_planes(dev, state);
* drm_atomic_state_free(state);
@@ -214,12 +214,18 @@ struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *crtc_state;
if (WARN_ON(!intel_crtc->config))
- return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ else
+ crtc_state = kmemdup(intel_crtc->config,
+ sizeof(*intel_crtc->config), GFP_KERNEL);
- return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
- GFP_KERNEL);
+ if (crtc_state)
+ crtc_state->base.crtc = crtc;
+
+ return &crtc_state->base;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 9e6f727dfd19..976b89156570 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -203,16 +203,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- *val = state->rotation;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
/**
@@ -233,14 +225,6 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- state->rotation = val;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3f178258d9f9..c684085cb56a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_link_params->vswing);
break;
}
+
+ if (bdb->version >= 173) {
+ uint8_t vswing;
+
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ dev_priv->vbt.edp_low_vswing = vswing == 0;
+ }
}
static void
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index a6a8710f665f..6afd5be33367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -554,6 +554,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
+ u64 edp_vswing_preemph; /* v173 */
} __packed;
struct psr_table {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e66e17af0a56..515d7123785d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else
+ else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp);
+ else
+ status = connector_status_unknown;
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
@@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_get_property = intel_connector_atomic_get_property,
};
@@ -848,7 +851,7 @@ void intel_crt_init(struct drm_device *dev)
if (!crt)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(crt);
return;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f14e8a2a022d..3eb0efc2dd0d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -139,18 +139,24 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00004014, 0x00000087 },
};
+/* eDP 1.4 low vswing translation parameters */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000a8 },
+ { 0x00002016, 0x000000ab },
+ { 0x00006012, 0x000000a2 },
+ { 0x00008010, 0x00000088 },
+ { 0x00000018, 0x000000ab },
+ { 0x00004014, 0x000000a2 },
+ { 0x00006012, 0x000000a6 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00005013, 0x0000009c },
+ { 0x00000018, 0x00000088 },
+};
+
+
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
- { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
- { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
- { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
- { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
- { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
- { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
- { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
- { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
- { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
- { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+ { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -187,7 +193,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- int i, n_hdmi_entries, hdmi_800mV_0dB;
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
@@ -198,60 +205,85 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
- ddi_translations_edp = skl_ddi_translations_dp;
+ n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ if (dev_priv->vbt.edp_low_vswing) {
+ ddi_translations_edp = skl_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = skl_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+
+ /*
+ * On SKL, the recommendation from the hw team is to always use
+ * a certain type of level shifter (and thus the corresponding
+ * 800mV+2dB entry). Given that's the only validated entry, we
+ * override what is in the VBT, at least until further notice.
+ */
+ hdmi_level = 0;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 0;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+ n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 6;
+ hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
}
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
+ size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
case PORT_D:
- if (intel_dp_is_edp(dev, PORT_D))
+ if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
- else
+ size = n_edp_entries;
+ } else {
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
+ }
break;
case PORT_E:
if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
default:
BUG();
}
- for (i = 0, reg = DDI_BUF_TRANS(port);
- i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
@@ -261,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_800mV_0dB;
+ hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
@@ -460,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
}
static struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *intel_encoder, *ret = NULL;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_encoder *ret = NULL;
+ struct drm_atomic_state *state;
int num_encoders = 0;
+ int i;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc == crtc) {
- ret = intel_encoder;
- num_encoders++;
- }
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i] ||
+ state->connector_states[i]->crtc != crtc_state->base.crtc)
+ continue;
+
+ ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ num_encoders++;
}
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
@@ -752,9 +790,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
+ case DPLL_CRTL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
+ case DPLL_CRTL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
@@ -1175,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(intel_crtc);
+ intel_ddi_get_crtc_new_encoder(crtc_state);
int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
@@ -2153,7 +2200,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
@@ -2172,7 +2219,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f75173c20f47..d547d9c8dda2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+ int x, int y, struct drm_framebuffer *old_fb,
+ struct drm_atomic_state *state);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -391,7 +392,7 @@ static const intel_limit_t intel_limits_chv = {
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4860000, .max = 6700000 },
+ .vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
-static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
+ int type)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
+ int i, num_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
- for_each_intel_encoder(dev, encoder)
- if (encoder->new_crtc == crtc && encoder->type == type)
+ num_connectors++;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (encoder->type == type)
return true;
+ }
+
+ WARN_ON(num_connectors == 0);
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
- int refclk)
+static const intel_limit_t *
+intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
+static const intel_limit_t *
+intel_g4x_limit(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
+static const intel_limit_t *
+intel_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc, refclk);
+ limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc);
+ limit = intel_g4x_limit(crtc_state);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
@@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
return found;
}
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const intel_clock_t *calculated_clock,
+ const intel_clock_t *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (WARN_ON_ONCE(!target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
@@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm, diff;
+ unsigned int ppm;
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
@@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
&clock))
continue;
- diff = abs(clock.dot - target);
- ppm = div_u64(1000000ULL * diff, target);
-
- if (ppm < 100 && clock.p > best_clock->p) {
- bestppm = 0;
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
- if (bestppm >= 10 && ppm < bestppm - 10) {
- bestppm = ppm;
- *best_clock = clock;
- found = true;
- }
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
}
}
}
@@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
intel_clock_t clock;
uint64_t m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
/*
* Based on hardware doc, the n always set to 1, and m1 always
@@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
clock.p = clock.p1 * clock.p2;
@@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
- /* based on hardware requirement, prefer bigger p
- */
- if (clock.p > best_clock->p) {
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
}
}
@@ -897,8 +965,12 @@ bool intel_crtc_active(struct drm_crtc *crtc)
*
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
*/
- return intel_crtc->active && crtc->primary->fb &&
+ return intel_crtc->active && crtc->primary->state->fb &&
intel_crtc->config->base.adjusted_mode.crtc_clock;
}
@@ -1301,14 +1373,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
u32 val;
if (INTEL_INFO(dev)->gen >= 9) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
I915_STATE_WARN(val & SP_ENABLE,
@@ -2190,30 +2262,109 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-int
-intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier)
+{
+ unsigned int tile_height;
+ uint32_t pixel_bytes;
+
+ switch (fb_format_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ tile_height = 1;
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ tile_height = IS_GEN2(dev) ? 16 : 8;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ tile_height = 32;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
+ switch (pixel_bytes) {
+ default:
+ case 1:
+ tile_height = 64;
+ break;
+ case 2:
+ case 4:
+ tile_height = 32;
+ break;
+ case 8:
+ tile_height = 16;
+ break;
+ case 16:
+ WARN_ONCE(1,
+ "128-bit pixels are not supported for display!");
+ tile_height = 16;
+ break;
+ }
+ break;
+ default:
+ MISSING_CASE(fb_format_modifier);
+ tile_height = 1;
+ break;
+ }
+
+ return tile_height;
+}
+
+unsigned int
+intel_fb_align_height(struct drm_device *dev, unsigned int height,
+ uint32_t pixel_format, uint64_t fb_format_modifier)
+{
+ return ALIGN(height, intel_tile_height(dev, pixel_format,
+ fb_format_modifier));
+}
+
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
- int tile_height;
+ struct intel_rotation_info *info = &view->rotation_info;
- tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
- return ALIGN(height, tile_height);
+ *view = i915_ggtt_view_normal;
+
+ if (!plane_state)
+ return 0;
+
+ if (!intel_rotation_90_or_270(plane_state->rotation))
+ return 0;
+
+ *view = i915_ggtt_view_rotated;
+
+ info->height = fb->height;
+ info->pixel_format = fb->pixel_format;
+ info->pitch = fb->pitches[0];
+ info->fb_modifier = fb->modifier[0];
+
+ if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
+ info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
+ DRM_DEBUG_KMS(
+ "Y or Yf tiling is needed for 90/270 rotation!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
u32 alignment;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2223,7 +2374,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
else
alignment = 64 * 1024;
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
@@ -2231,13 +2382,22 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
alignment = 0;
}
break;
- case I915_TILING_Y:
- WARN(1, "Y tiled bo slipped through, driver bug!\n");
- return -EINVAL;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
+ "Y tiling bo slipped through, driver bug!\n"))
+ return -EINVAL;
+ alignment = 1 * 1024 * 1024;
+ break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
+ return -EINVAL;
}
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ if (ret)
+ return ret;
+
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
@@ -2256,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
intel_runtime_pm_get(dev_priv);
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+ &view);
if (ret)
goto err_interruptible;
@@ -2276,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return ret;
}
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
+ int ret;
+
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ WARN_ONCE(ret, "Couldn't get view from plane state!");
+
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2366,12 +2535,13 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
static bool
-intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
@@ -2390,25 +2560,24 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
- obj->stride = crtc->base.primary->fb->pitches[0];
+ obj->stride = fb->pitches[0];
- mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
- mode_cmd.width = crtc->base.primary->fb->width;
- mode_cmd.height = crtc->base.primary->fb->height;
- mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
+ mode_cmd.pixel_format = fb->pixel_format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
mutex_lock(&dev->struct_mutex);
-
- if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
+ if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
-
- obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
out_unref_obj:
@@ -2421,35 +2590,37 @@ out_unref_obj:
static void
update_state_fb(struct drm_plane *plane)
{
- if (plane->fb != plane->state->fb)
- drm_atomic_set_fb_for_plane(plane->state, plane->fb);
+ if (plane->fb == plane->state->fb)
+ return;
+
+ if (plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+ plane->state->fb = plane->fb;
+ if (plane->state->fb)
+ drm_framebuffer_reference(plane->state->fb);
}
static void
-intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_initial_plane_config *plane_config)
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
+ struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_framebuffer *fb;
- if (!intel_crtc->base.primary->fb)
+ if (!plane_config->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- update_state_fb(primary);
-
- return;
+ if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ goto valid_fb;
}
- kfree(intel_crtc->base.primary->fb);
- intel_crtc->base.primary->fb = NULL;
+ kfree(plane_config->fb);
/*
* Failed to alloc the obj, check to see if we should share
@@ -2464,26 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!i->active)
continue;
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
+ fb = c->primary->fb;
+ if (!fb)
continue;
+ obj = intel_fb_obj(fb);
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dev_priv->preserve_bios_swizzle = true;
-
- drm_framebuffer_reference(c->primary->fb);
- primary->fb = c->primary->fb;
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
- break;
+ drm_framebuffer_reference(fb);
+ goto valid_fb;
}
}
- update_state_fb(intel_crtc->base.primary);
+ return;
+
+valid_fb:
+ obj = intel_fb_obj(fb);
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
+ primary->fb = fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2604,9 +2778,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
@@ -2708,9 +2879,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -2723,6 +2891,51 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes
+ * chunks for linear buffers or in number of tiles for tiled
+ * buffers.
+ */
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ return 64;
+ case I915_FORMAT_MOD_X_TILED:
+ if (INTEL_INFO(dev)->gen == 2)
+ return 128;
+ return 512;
+ case I915_FORMAT_MOD_Y_TILED:
+ /* No need to check for old gens and Y tiling since this is
+ * about the display engine and those will be blocked before
+ * we get here.
+ */
+ return 128;
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (bits_per_pixel == 8)
+ return 64;
+ else
+ return 128;
+ default:
+ MISSING_CASE(fb_modifier);
+ return 64;
+ }
+}
+
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj)
+{
+ const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+
+ if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
+ view = &i915_ggtt_view_rotated;
+
+ return i915_gem_obj_ggtt_offset_view(obj, view);
+}
+
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
@@ -2730,10 +2943,10 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
+ unsigned long surf_addr;
if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2777,43 +2990,39 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
BUG();
}
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-
- DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
- i915_gem_obj_ggtt_offset(obj),
- x, y, fb->width, fb->height,
- fb->pitches[0]);
+ obj = intel_fb_obj(fb);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+ surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config->pipe_src_h - 1) << 16 |
(intel_crtc->config->pipe_src_w - 1));
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
- I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
+ I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -3064,38 +3273,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
-{
- return crtc->base.enabled && crtc->active &&
- crtc->config->has_pch_encoder;
-}
-
-static void ivb_modeset_global_resources(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
- struct intel_crtc *pipe_C_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
- uint32_t temp;
-
- /*
- * When everything is off disable fdi C so that we could enable fdi B
- * with all lanes. Note that we don't care about enabled pipes without
- * an enabled pch encoder.
- */
- if (!pipe_has_enabled_pch(pipe_B_crtc) &&
- !pipe_has_enabled_pch(pipe_C_crtc)) {
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp = I915_READ(SOUTH_CHICKEN1);
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("disabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- }
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -3751,20 +3928,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
@@ -3772,20 +3952,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config->fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ cpt_set_fdi_bc_bifurcation(dev, false);
else
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
case PIPE_C:
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
default:
@@ -4120,6 +4299,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
+/*
+ * Disable a plane internally without actually modifying the plane's state.
+ * This will allow us to easily restore the plane later by just reprogramming
+ * its state.
+ */
+static void disable_plane_internal(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_plane_state *state =
+ plane->funcs->atomic_duplicate_state(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ intel_state->visible = false;
+ intel_plane->commit_plane(plane, intel_state);
+
+ intel_plane_destroy_state(plane, state);
+}
+
static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4129,8 +4326,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- plane->funcs->disable_plane(plane);
+ if (plane->fb && intel_plane->pipe == pipe)
+ disable_plane_internal(plane);
}
}
@@ -4204,7 +4401,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false;
/* The clocks have to be on to load the palette. */
- if (!crtc->enabled || !intel_crtc->active)
+ if (!crtc->state->enable || !intel_crtc->active)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -4288,11 +4485,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- if (dev_priv->fbc.plane == plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
@@ -4318,7 +4514,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4327,7 +4523,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_prepare_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4426,7 +4622,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4435,7 +4631,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4760,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-static void modeset_update_crtc_power_domains(struct drm_device *dev)
+static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
@@ -4773,7 +4970,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
- if (!crtc->base.enabled)
+ if (!crtc->base.state->enable)
continue;
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@@ -4783,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
}
if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ dev_priv->display.modeset_global_resources(state);
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4900,24 +5097,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
switch (cdclk) {
- case 400000:
- cmd = 3;
- break;
case 333333:
case 320000:
- cmd = 2;
- break;
case 266667:
- cmd = 1;
- break;
case 200000:
- cmd = 0;
break;
default:
MISSING_CASE(cdclk);
return;
}
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -4937,27 +5133,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
-
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev_priv->dev))
- return 400000;
+ int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320/333MHz (depends on HPLL freq)
- * 400MHz
- * So we check to see whether we're above 90% of the lower bin and
- * adjust if needed.
+ * 400MHz (VLV only)
+ * So we check to see whether we're above 90% (VLV) or 95% (CHV)
+ * of the lower bin and adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (max_pixclk > freq_320*9/10)
+ if (!IS_CHERRYVIEW(dev_priv) &&
+ max_pixclk > freq_320*limit/100)
return 400000;
- else if (max_pixclk > 266667*9/10)
+ else if (max_pixclk > 266667*limit/100)
return freq_320;
else if (max_pixclk > 0)
return 266667;
@@ -4994,12 +5188,49 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
/* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc)
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
-static void valleyview_modeset_global_resources(struct drm_device *dev)
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_31;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ default_credits);
+
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5021,6 +5252,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
else
valleyview_set_cdclk(dev, req_cdclk);
+ vlv_program_pfi_credits(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
@@ -5034,7 +5267,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
bool is_dsi;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5049,7 +5282,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
}
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5117,7 +5350,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5125,7 +5358,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5316,7 +5549,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
/* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
@@ -5394,7 +5627,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
crtc = encoder->base.crtc;
- I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!crtc->state->enable,
+ "crtc not enabled\n");
I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
@@ -5402,6 +5636,34 @@ static void intel_connector_check_state(struct intel_connector *connector)
}
}
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+
+ connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
+ if (!connector_state)
+ return -ENOMEM;
+
+ connector->base.state = connector_state;
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof *connector, GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
/* Even simpler default implementation, if there's really no special case to
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
@@ -5433,13 +5695,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
+static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+
+ if (crtc->base.state->enable &&
+ crtc->config->has_pch_encoder)
+ return crtc->config->fdi_lanes;
+
+ return 0;
+}
+
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
@@ -5466,22 +5736,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
case PIPE_A:
return true;
case PIPE_B:
- if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
- pipe_config->fdi_lanes > 2) {
+ if (pipe_config->fdi_lanes > 2 &&
+ pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
- if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config->fdi_lanes <= 2) {
- if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
- }
- } else {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
@@ -5581,7 +5849,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5615,10 +5883,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
u32 val;
int divider;
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev))
- return 400000;
-
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
@@ -5764,15 +6028,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+ int num_connectors)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
+ WARN_ON(!crtc_state->base.state);
+
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5815,8 +6082,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915.powersave) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
@@ -5884,7 +6151,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@@ -5900,13 +6167,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc)
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
{
+ struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+
+ if (m_n == M1_N1) {
+ dp_m_n = &crtc->config->dp_m_n;
+ dp_m2_n2 = &crtc->config->dp_m2_n2;
+ } else if (m_n == M2_N2) {
+
+ /*
+ * M2_N2 registers are not supported. Hence m2_n2 divider value
+ * needs to be programmed into M1_N1.
+ */
+ dp_m_n = &crtc->config->dp_m2_n2;
+ } else {
+ DRM_ERROR("Unsupported divider value\n");
+ return;
+ }
+
if (crtc->config->has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
- &crtc->config->dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
@@ -6044,9 +6327,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, intcoeff;
+ u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- int refclk;
+ u32 dpio_val;
+ int vco;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
@@ -6054,6 +6338,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
/*
* Enable Refclk and SSC
@@ -6079,26 +6366,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ if (bestm2_frac)
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
- DPIO_CHV_FRAC_DIV_EN |
- (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
- refclk = i9xx_get_refclk(crtc, 0);
- loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
- 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
- if (refclk == 100000)
- intcoeff = 11;
- else if (refclk == 38400)
- intcoeff = 10;
- else
- intcoeff = 9;
- loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
@@ -6123,6 +6440,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_state pipe_config = {
+ .base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6167,12 +6485,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -6215,7 +6533,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6245,7 +6563,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -6256,10 +6574,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6473,11 +6791,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != &crtc->base)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6496,7 +6823,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ refclk = i9xx_get_refclk(crtc_state, num_connectors);
/*
* Returns a set of divisors for the desired target clock with
@@ -6504,8 +6831,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
@@ -6521,7 +6848,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, &clock,
&reduced_clock);
@@ -6620,7 +6947,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -6636,9 +6963,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -6664,7 +6994,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -6673,7 +7004,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7147,18 +7478,26 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct drm_crtc *crtc)
+static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- int num_connectors = 0;
+ int num_connectors = 0, i;
bool is_lvds = false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7345,22 +7684,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
+ is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
- refclk = ironlake_get_refclk(crtc);
+ refclk = ironlake_get_refclk(crtc_state);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(intel_crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
@@ -7374,7 +7712,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, intel_crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7407,16 +7745,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
uint32_t dpll;
- int factor, num_connectors = 0;
+ int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
- switch (intel_encoder->type) {
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -7545,7 +7891,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- if (is_lvds && has_reduced_clock && i915.powersave)
+ if (is_lvds && has_reduced_clock)
crtc->lowfreq_avail = true;
else
crtc->lowfreq_avail = false;
@@ -7651,10 +7997,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, base, offset, stride_mult;
+ u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7670,9 +8016,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
if (!(val & PLANE_CTL_ENABLE))
goto error;
- if (val & PLANE_CTL_TILED_MASK)
- plane_config->tiling = I915_TILING_X;
-
pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
@@ -7680,6 +8023,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
plane_config->base = base;
@@ -7690,21 +8053,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- stride_mult = 64;
- break;
- case I915_TILING_X:
- stride_mult = 512;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto error;
- }
+ stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7713,7 +8068,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
return;
error:
@@ -7753,7 +8108,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7769,9 +8124,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -7797,7 +8155,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7806,7 +8165,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -8292,8 +8651,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
uint32_t cntl = 0, size = 0;
if (base) {
- unsigned int width = intel_crtc->cursor_width;
- unsigned int height = intel_crtc->cursor_height;
+ unsigned int width = intel_crtc->base.cursor->state->crtc_w;
+ unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
@@ -8357,7 +8716,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl = 0;
if (base) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
+ switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -8368,7 +8727,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->cursor_width);
+ MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8415,7 +8774,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width <= 0)
+ if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -8424,7 +8783,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height <= 0)
+ if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -8440,8 +8799,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (intel_crtc->cursor_height *
- intel_crtc->cursor_width - 1) * 4;
+ base += (intel_crtc->base.cursor->state->crtc_h *
+ intel_crtc->base.cursor->state->crtc_w - 1) * 4;
}
if (IS_845G(dev) || IS_I865G(dev))
@@ -8633,6 +8992,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL;
+ struct drm_connector_state *connector_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -8680,7 +9041,7 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (possible_crtc->enabled)
+ if (possible_crtc->state->enable)
continue;
/* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled)
@@ -8714,6 +9075,21 @@ retry:
old->load_detect_temp = true;
old->release_fb = NULL;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return false;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &intel_encoder->base;
+
if (!mode)
mode = &load_detect_mode;
@@ -8736,7 +9112,7 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -8749,12 +9125,17 @@ retry:
return true;
fail:
- intel_crtc->new_enabled = crtc->enabled;
+ intel_crtc->new_enabled = crtc->state->enable;
if (intel_crtc->new_enabled)
intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
+ if (state) {
+ drm_atomic_state_free(state);
+ state = NULL;
+ }
+
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
@@ -8764,24 +9145,44 @@ fail_unlock:
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
if (old->load_detect_temp) {
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ goto fail;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ connector_state->best_encoder = NULL;
+ connector_state->crtc = NULL;
+
+ intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+
+ drm_atomic_state_free(state);
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -8794,6 +9195,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ return;
+fail:
+ DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
+ drm_atomic_state_free(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9032,6 +9438,8 @@ void intel_mark_busy(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
@@ -9045,9 +9453,6 @@ void intel_mark_idle(struct drm_device *dev)
dev_priv->mm.busy = false;
- if (!i915.powersave)
- goto out;
-
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -9058,7 +9463,6 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
-out:
intel_runtime_pm_put(dev_priv);
}
@@ -9100,9 +9504,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
- drm_gem_object_unreference(&work->old_fb_obj->base);
intel_fbc_update(dev);
@@ -9111,6 +9514,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9627,69 +10031,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
return 0;
}
-static int intel_gen9_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane = 0, stride;
- int ret;
-
- switch(intel_crtc->pipe) {
- case PIPE_A:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
- break;
- case PIPE_B:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
- break;
- case PIPE_C:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
- break;
- case I915_TILING_X:
- stride = fb->pitches[0] >> 9;
- break;
- default:
- WARN_ONCE(1, "unknown tiling in flip command\n");
- return -ENODEV;
- }
-
- ret = intel_ring_begin(ring, 10);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_emit(ring, 0);
-
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
- intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-
- intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
-
- return 0;
-}
-
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9719,10 +10060,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
!i915_gem_request_completed(work->flip_queued_req, true))
return false;
- work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
}
- if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
+ if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
@@ -9753,7 +10094,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ intel_crtc->unpin_work->flip_queued_vblank,
+ drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
spin_unlock(&dev->event_lock);
@@ -9805,7 +10147,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = intel_fb_obj(old_fb);
+ work->old_fb = old_fb;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9836,12 +10178,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_framebuffer_reference(work->old_fb);
drm_gem_object_reference(&obj->base);
crtc->primary->fb = fb;
@@ -9849,6 +10187,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -9857,7 +10199,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
- if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
@@ -9870,12 +10212,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+ crtc->primary->state, ring);
if (ret)
goto cleanup_pending;
- work->gtt_offset =
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+ work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
+ + intel_crtc->dspaddr_offset;
if (use_mmio_flip(ring, obj)) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -9895,10 +10238,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_ring_get_request(ring));
}
- work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
- i915_gem_track_fb(work->old_fb_obj, obj,
+ i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
intel_fbc_disable(dev);
@@ -9910,16 +10253,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ mutex_unlock(&dev->struct_mutex);
+cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- drm_gem_object_unreference(&work->old_fb_obj->base);
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
-cleanup:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ drm_framebuffer_unreference(work->old_fb);
+
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irq(&dev->event_lock);
@@ -9959,8 +10303,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
}
@@ -9971,7 +10314,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = crtc->base.enabled;
+ crtc->new_enabled = crtc->base.state->enable;
if (crtc->new_enabled)
crtc->new_config = crtc->config;
@@ -9980,6 +10323,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
}
+/* Transitional helper to copy current connector/encoder state to
+ * connector->state. This is needed so that code that is partially
+ * converted to atomic does the right thing.
+ */
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder) {
+ connector->base.state->best_encoder =
+ connector->base.encoder;
+ connector->base.state->crtc =
+ connector->base.encoder->crtc;
+ } else {
+ connector->base.state->best_encoder = NULL;
+ connector->base.state->crtc = NULL;
+ }
+ }
+}
+
/**
* intel_modeset_commit_output_state
*
@@ -9991,8 +10355,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->base.encoder = &connector->new_encoder->base;
}
@@ -10001,8 +10364,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ crtc->base.state->enable = crtc->new_enabled;
crtc->base.enabled = crtc->new_enabled;
}
+
+ intel_modeset_update_connector_atomic_state(dev);
}
static void
@@ -10037,8 +10403,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state;
struct intel_connector *connector;
- int bpp;
+ int bpp, i;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
@@ -10078,11 +10445,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
pipe_config->pipe_bpp = bpp;
+ state = pipe_config->base.state;
+
/* Clamp display bpp to EDID value */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (!connector->new_encoder ||
- connector->new_encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector = to_intel_connector(state->connectors[i]);
+ if (state->connector_states[i]->crtc != &crtc->base)
continue;
connected_sink_compute_bpp(connector, pipe_config);
@@ -10207,8 +10578,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
struct intel_encoder *encoder = connector->new_encoder;
if (!encoder)
@@ -10239,15 +10609,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
+static void
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_crtc_state tmp_state;
+
+ /* Clear only the intel specific part of the crtc state */
+ tmp_state = crtc_state->base;
+ memset(crtc_state, 0, sizeof *crtc_state);
+ crtc_state->base = tmp_state;
+}
+
static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode,
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
+ int i;
bool retry = true;
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
@@ -10260,10 +10645,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config)
- return ERR_PTR(-ENOMEM);
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+ clear_intel_crtc_state(pipe_config);
+
+ pipe_config->base.crtc = crtc;
drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
drm_mode_copy(&pipe_config->base.mode, mode);
@@ -10318,11 +10706,17 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for_each_intel_encoder(dev, encoder) {
+ for (i = 0; i < state->num_connector; i++) {
+ connector = to_intel_connector(state->connectors[i]);
+ if (!connector)
+ continue;
- if (&encoder->new_crtc->base != crtc)
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
@@ -10358,7 +10752,6 @@ encoder_retry:
return pipe_config;
fail:
- kfree(pipe_config);
return ERR_PTR(ret);
}
@@ -10380,8 +10773,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
@@ -10412,7 +10804,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
/* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.enabled == intel_crtc->new_enabled)
+ if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
continue;
if (!intel_crtc->new_enabled)
@@ -10487,10 +10879,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
/* Double check state. */
for_each_intel_crtc(dev, intel_crtc) {
- WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != intel_crtc->config);
- WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
+ WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -10750,7 +11142,7 @@ static void check_wm_state(struct drm_device *dev)
continue;
/* planes */
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
@@ -10784,8 +11176,7 @@ check_connector_state(struct drm_device *dev)
{
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
@@ -10815,8 +11206,7 @@ check_encoder_state(struct drm_device *dev)
I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
enabled = true;
@@ -10877,7 +11267,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- I915_STATE_WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10891,9 +11281,10 @@ check_crtc_state(struct drm_device *dev)
I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- I915_STATE_WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.state->enable,
"crtc's computed enabled state doesn't match tracked enabled state "
- "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+ "(expected %i, found %i)\n", enabled,
+ crtc->base.state->enable);
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
@@ -10957,7 +11348,7 @@ check_shared_dpll_state(struct drm_device *dev)
pll->on, active);
for_each_intel_crtc(dev, crtc) {
- if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
@@ -11039,17 +11430,30 @@ static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
+ struct drm_atomic_state *state,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
+ struct drm_device *dev = crtc->dev;
struct intel_crtc_state *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ERR_PTR(ret);
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
- if ((*modeset_pipes) == 0)
- goto out;
+ for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ pipe_config->base.enable = false;
+ }
/*
* Note this needs changes when we start tracking multiple modes
@@ -11057,15 +11461,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- goto out;
+ for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
+ /* FIXME: For now we still expect modeset_pipes has at most
+ * one bit set. */
+ if (WARN_ON(&intel_crtc->base != crtc))
+ continue;
+
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
}
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
-out:
- return pipe_config;
+ return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
}
static int __intel_set_mode_setup_plls(struct drm_device *dev,
@@ -11109,6 +11519,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
+ struct intel_crtc_state *crtc_state_copy = NULL;
struct intel_crtc *intel_crtc;
int ret = 0;
@@ -11116,6 +11527,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
if (!saved_mode)
return -ENOMEM;
+ crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
+ if (!crtc_state_copy) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
*saved_mode = crtc->mode;
if (modeset_pipes)
@@ -11143,7 +11560,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
intel_crtc_disable(&intel_crtc->base);
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
dev_priv->display.crtc_disable(&intel_crtc->base);
}
@@ -11173,7 +11590,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- modeset_update_crtc_power_domains(dev);
+ modeset_update_crtc_power_domains(pipe_config->base.state);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11199,9 +11616,25 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
- if (ret && crtc->enabled)
+ if (ret && crtc->state->enable)
crtc->mode = *saved_mode;
+ if (ret == 0 && pipe_config) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* The pipe_config will be freed with the atomic state, so
+ * make a copy. */
+ memcpy(crtc_state_copy, intel_crtc->config,
+ sizeof *crtc_state_copy);
+ intel_crtc->config = crtc_state_copy;
+ intel_crtc->base.state = &crtc_state_copy->base;
+
+ if (modeset_pipes)
+ intel_crtc->new_config = intel_crtc->config;
+ } else {
+ kfree(crtc_state_copy);
+ }
+
kfree(saved_mode);
return ret;
}
@@ -11227,27 +11660,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct drm_atomic_state *state)
{
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+ ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* The force restore path in the HW readout code relies on the staged
+ * config still keeping the user requested config while the actual
+ * state has been overwritten by the configuration read from HW. We
+ * need to copy the staged config to the atomic state, otherwise the
+ * mode set will just reapply the state the HW is already in. */
+ for_each_intel_encoder(dev, encoder) {
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->new_encoder != encoder)
+ continue;
+
+ connector_state = drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state)) {
+ DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
+ connector->base.base.id,
+ connector->base.name,
+ PTR_ERR(connector_state));
+ continue;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &encoder->base;
+ }
+ }
+
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
+ state);
+
+ drm_atomic_state_free(state);
}
#undef for_each_intel_crtc_masked
@@ -11295,7 +11782,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
*/
count = 0;
for_each_crtc(dev, crtc) {
- config->save_crtc_enabled[count++] = crtc->enabled;
+ config->save_crtc_enabled[count++] = crtc->state->enable;
}
count = 0;
@@ -11336,7 +11823,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
@@ -11416,9 +11903,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config)
+ struct intel_set_config *config,
+ struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
@@ -11428,8 +11917,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -11454,15 +11942,16 @@ intel_modeset_stage_output_state(struct drm_device *dev,
if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.base.id,
+ connector->base.name);
config->mode_changed = true;
}
}
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
struct drm_crtc *new_crtc;
if (!connector->new_encoder)
@@ -11482,6 +11971,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ connector_state->crtc = new_crtc;
+ connector_state->best_encoder = &connector->new_encoder->base;
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
@@ -11491,9 +11988,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
num_connectors++;
@@ -11508,16 +12003,25 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
+ encoder->base.base.id,
+ encoder->base.name);
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (connector->new_encoder)
+ for_each_intel_connector(dev, connector) {
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (connector->new_encoder) {
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
+ } else {
+ connector_state->crtc = NULL;
+ }
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -11529,8 +12033,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
- if (crtc->new_enabled != crtc->base.enabled) {
- DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ if (crtc->new_enabled != crtc->base.state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
+ crtc->base.base.id,
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
@@ -11553,7 +12058,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
@@ -11572,6 +12077,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_mode_set save_set;
+ struct drm_atomic_state *state = NULL;
struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
@@ -11616,12 +12122,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* such cases. */
intel_set_config_compute_mode_changes(set, config);
- ret = intel_modeset_stage_output_state(dev, set, config);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_config;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ ret = intel_modeset_stage_output_state(dev, set, config, state);
if (ret)
goto fail;
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- set->fb,
+ set->fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
@@ -11641,10 +12155,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
*/
}
- /* set_mode will free it in the mode_changed case */
- if (!config->mode_changed)
- kfree(pipe_config);
-
intel_update_pipe_size(to_intel_crtc(set->crtc));
if (config->mode_changed) {
@@ -11690,6 +12200,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ drm_atomic_state_clear(state);
+
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
@@ -11702,11 +12214,15 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ save_set.x, save_set.y, save_set.fb,
+ state))
DRM_ERROR("failed to restore config after modeset failure\n");
}
out_config:
+ if (state)
+ drm_atomic_state_free(state);
+
intel_set_config_free(config);
return ret;
}
@@ -11821,6 +12337,28 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ return false;
+}
+
+/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -11834,7 +12372,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -11868,7 +12407,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
}
if (ret == 0)
@@ -11888,7 +12427,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -11899,7 +12439,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, old_state);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -11944,7 +12484,7 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.crtc == intel_crtc &&
state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
@@ -11963,6 +12503,9 @@ intel_check_primary_plane(struct drm_plane *plane,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
intel_crtc->atomic.update_fbc = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
}
return 0;
@@ -11977,8 +12520,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
crtc = crtc ? crtc : plane->crtc;
@@ -11988,8 +12529,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->obj = obj;
-
if (intel_crtc->active) {
if (state->visible) {
/* FIXME: kill this fastboot hack */
@@ -12229,17 +12768,14 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
finish:
if (intel_crtc->active) {
- if (intel_crtc->cursor_width != state->base.crtc_w)
+ if (plane->state->crtc_w != state->base.crtc_w)
intel_crtc->atomic.update_wm = true;
intel_crtc->atomic.fb_bits |=
@@ -12256,7 +12792,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
@@ -12267,8 +12802,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc->cursor_x = state->base.crtc_x;
crtc->cursor_y = state->base.crtc_y;
- intel_plane->obj = obj;
-
if (intel_crtc->cursor_bo == obj)
goto update;
@@ -12282,8 +12815,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
- intel_crtc->cursor_width = state->base.crtc_w;
- intel_crtc->cursor_height = state->base.crtc_h;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, state->visible);
@@ -12353,6 +12884,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (!crtc_state)
goto fail;
intel_crtc_set_state(intel_crtc, crtc_state);
+ crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
@@ -12430,9 +12962,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc) {
@@ -12502,7 +13031,6 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12513,10 +13041,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_DDI(dev)) {
int found;
- /* Haswell uses DDI functions to detect digital outputs */
+ /*
+ * Haswell uses DDI functions to detect digital outputs.
+ * On SKL pre-D0 the strap isn't connected, so we assume
+ * it's there.
+ */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
- /* DDI A only supports eDP */
- if (found)
+ /* WaIgnoreDDIAStrap: skl */
+ if (found ||
+ (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -12633,37 +13166,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- /*
- * FIXME: We don't have full atomic support yet, but we want to be
- * able to enable/test plane updates via the atomic interface in the
- * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
- * will take some atomic codepaths to lookup properties during
- * drmModeGetConnector() that unconditionally dereference
- * connector->state.
- *
- * We create a dummy connector state here for each connector to ensure
- * the DRM core doesn't try to dereference a NULL connector->state.
- * The actual connector properties will never be updated or contain
- * useful information, but since we're doing this specifically for
- * testing/debug of the plane operations (and only when a specific
- * kernel module option is given), that shouldn't really matter.
- *
- * Once atomic support for crtc's + connectors lands, this loop should
- * be removed since we'll be setting up real connector state, which
- * will contain Intel-specific properties.
- */
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (!WARN_ON(connector->state)) {
- connector->state =
- kzalloc(sizeof(*connector->state),
- GFP_KERNEL);
- }
- }
- }
-
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12705,52 +13207,100 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
+static
+u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 gen = INTEL_INFO(dev)->gen;
+
+ if (gen >= 9) {
+ /* "The stride in bytes must not exceed the of the size of 8K
+ * pixels and 32K bytes."
+ */
+ return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
+ return 32*1024;
+ } else if (gen >= 4) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 16*1024;
+ else
+ return 32*1024;
+ } else if (gen >= 3) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ /* XXX DSPC is limited to 4k tiled */
+ return 8*1024;
+ }
+}
+
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
- int aligned_height;
- int pitch_limit;
+ unsigned int aligned_height;
int ret;
+ u32 pitch_limit, stride_alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->tiling_mode == I915_TILING_Y) {
- DRM_DEBUG("hardware does not support tiling Y\n");
- return -EINVAL;
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /* Enforce that fb modifier and tiling mode match, but only for
+ * X-tiled. This is needed for FBC. */
+ if (!!(obj->tiling_mode == I915_TILING_X) !=
+ !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+ DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (obj->tiling_mode == I915_TILING_X)
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ else if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
}
- if (mode_cmd->pitches[0] & 63) {
- DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
- mode_cmd->pitches[0]);
+ /* Passed in modifier sanity checking. */
+ switch (mode_cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (INTEL_INFO(dev)->gen < 9) {
+ DRM_DEBUG("Unsupported tiling 0x%llx!\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ case DRM_FORMAT_MOD_NONE:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
+ mode_cmd->modifier[0]);
return -EINVAL;
}
- if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode)
- pitch_limit = 16*1024;
- else
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 3) {
- if (obj->tiling_mode)
- pitch_limit = 8*1024;
- else
- pitch_limit = 16*1024;
- } else
- /* XXX DSPC is limited to 4k tiled */
- pitch_limit = 8*1024;
+ stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
+ if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+ DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
+ mode_cmd->pitches[0], stride_alignment);
+ return -EINVAL;
+ }
+ pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
- DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
- obj->tiling_mode ? "tiled" : "linear",
+ DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
- if (obj->tiling_mode != I915_TILING_NONE &&
+ if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
@@ -12805,7 +13355,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
aligned_height = intel_fb_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12958,8 +13509,6 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
} else if (IS_VALLEYVIEW(dev)) {
@@ -12967,9 +13516,6 @@ static void intel_init_display(struct drm_device *dev)
valleyview_modeset_global_resources;
}
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
-
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -12992,8 +13538,10 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
- dev_priv->display.queue_flip = intel_gen9_queue_flip;
- break;
+ /* Drop through - unsupported since execlist only. */
+ default:
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
}
intel_panel_init_backlight_funcs(dev);
@@ -13212,6 +13760,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
+
dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
@@ -13254,7 +13804,7 @@ void intel_modeset_init(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
@@ -13295,7 +13845,7 @@ void intel_modeset_init(struct drm_device *dev)
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
- intel_find_plane_obj(crtc, &crtc->plane_config);
+ intel_find_initial_plane_obj(crtc, &crtc->plane_config);
}
}
}
@@ -13310,9 +13860,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
@@ -13323,7 +13871,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
return;
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp);
+ intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
static bool
@@ -13357,11 +13905,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
+ drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
update_scanline_offset(crtc);
- drm_vblank_on(dev, crtc->pipe);
- } else
- drm_vblank_off(dev, crtc->pipe);
+ drm_crtc_vblank_on(&crtc->base);
+ }
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
@@ -13383,8 +13931,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->plane = plane;
/* ... and break all links. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
@@ -13393,14 +13940,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head)
+ for_each_intel_connector(dev, connector)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
}
WARN_ON(crtc->active);
+ crtc->base.state->enable = false;
crtc->base.enabled = false;
}
@@ -13417,7 +13964,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base);
- if (crtc->active != crtc->base.enabled) {
+ if (crtc->active != crtc->base.state->enable) {
struct intel_encoder *encoder;
/* This can happen either due to bugs in the get_hw_state
@@ -13425,9 +13972,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
- crtc->base.enabled ? "enabled" : "disabled",
+ crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
/* Because we only establish the connector -> encoder ->
@@ -13496,9 +14044,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder != encoder)
continue;
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -13564,6 +14110,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13612,8 +14159,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
@@ -13669,6 +14215,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
"[setup_hw_state]");
}
+ intel_modeset_update_connector_atomic_state(dev);
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -13697,8 +14245,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_crtc_restore_mode(crtc);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13712,6 +14259,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
+ int ret;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -13736,15 +14284,18 @@ void intel_modeset_gem_init(struct drm_device *dev)
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
- mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(c->primary,
- c->primary->fb,
- NULL)) {
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ c->primary->state,
+ NULL);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13752,7 +14303,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
update_state_fb(c->primary);
}
}
- mutex_unlock(&dev->struct_mutex);
intel_backlight_register(dev);
}
@@ -13793,8 +14343,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_fbc_disable(dev);
- ironlake_teardown_rc6(dev);
-
mutex_unlock(&dev->struct_mutex);
/* flush any delayed tasks or pending work */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a74aaf9242b9..d0237102c27e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -84,6 +84,13 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+/* Skylake supports following rates */
+static const int gen9_rates[] = { 162000, 216000, 270000,
+ 324000, 432000, 540000 };
+static const int chv_rates[] = { 162000, 202500, 210000, 216000,
+ 243000, 270000, 324000, 405000,
+ 420000, 432000, 540000 };
+static const int default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -118,23 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
-int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
- struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
- break;
- case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
- INTEL_INFO(dev)->gen >= 8) &&
- intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
- max_link_bw = DP_LINK_BW_5_4;
- else
- max_link_bw = DP_LINK_BW_2_7;
+ case DP_LINK_BW_5_4:
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -210,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
- max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
@@ -240,7 +239,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
return v;
}
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -943,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = msg->request << 4;
- txbuf[1] = msg->address >> 8;
+ txbuf[0] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
@@ -952,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 1;
+ rxsize = 2; /* 0 or 1 data bytes */
if (WARN_ON(txsize > 20))
return -E2BIG;
@@ -963,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
- /* Return payload size. */
- ret = msg->size;
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
}
break;
@@ -1075,7 +1080,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{
u32 ctrl1;
@@ -1084,19 +1089,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (link_bw) {
- case DP_LINK_BW_1_62:
+ switch (link_clock / 2) {
+ case 81000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
- case DP_LINK_BW_2_7:
+ case 135000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
- case DP_LINK_BW_5_4:
+ case 270000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
+ case 162000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ SKL_DPLL0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ SKL_DPLL0);
+ break;
+
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
@@ -1117,6 +1138,42 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
}
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+{
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *source_rates = gen9_rates;
+ return ARRAY_SIZE(gen9_rates);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *source_rates = chv_rates;
+ return ARRAY_SIZE(chv_rates);
+ }
+
+ *source_rates = default_rates;
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ /* WaDisableHBR2:skl */
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+ else if (INTEL_INFO(dev)->gen >= 8 ||
+ (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ return (DP_LINK_BW_5_4 >> 3) + 1;
+ else
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, int link_bw)
@@ -1150,6 +1207,113 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(dev, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
+static void snprintf_int_array(char *str, size_t len,
+ const int *array, int nelem)
+{
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < nelem; i++) {
+ int r = snprintf(str, len, "%d,", array[i]);
+ if (r >= len)
+ return;
+ str += r;
+ len -= r;
+ }
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len, common_len;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ char str[128]; /* FIXME: too big for stack? */
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ source_len = intel_dp_source_rates(dev, &source_rates);
+ snprintf_int_array(str, sizeof(str), source_rates, source_len);
+ DRM_DEBUG_KMS("source rates: %s\n", str);
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+ DRM_DEBUG_KMS("sink rates: %s\n", str);
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ DRM_DEBUG_KMS("common rates: %s\n", str);
+}
+
+static int rate_to_index(int find, const int *rates)
+{
+ int i = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
+ if (find == rates[i])
+ break;
+
+ return i;
+}
+
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+ int rates[DP_MAX_SUPPORTED_RATES] = {};
+ int len;
+
+ len = intel_dp_common_rates(intel_dp, rates);
+ if (WARN_ON(len <= 0))
+ return 162000;
+
+ return rates[rate_to_index(0, rates) - 1];
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+ return rate_to_index(rate, intel_dp->sink_rates);
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1159,17 +1323,25 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
- int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
+ int max_clock;
int bpp, mode_rate;
- static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
+ int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+ int common_len;
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+
+ /* No common link rates between source and sink */
+ WARN_ON(common_len <= 0);
+
+ max_clock = common_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1193,8 +1365,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock],
+ "max bw %d pixel clock %iKHz\n",
+ max_lane_count, common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1223,8 +1395,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp);
for (clock = min_clock; clock <= max_clock; clock++) {
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ for (lane_count = min_lane_count;
+ lane_count <= max_lane_count;
+ lane_count <<= 1) {
+
+ link_clock = common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1253,10 +1428,20 @@ found:
if (intel_dp->color_range)
pipe_config->limited_color_range = true;
- intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select =
+ intel_dp_rate_select(intel_dp, common_rates[clock]);
+ } else {
+ intel_dp->link_bw =
+ drm_dp_link_rate_to_bw_code(common_rates[clock]);
+ intel_dp->rate_select = 0;
+ }
+
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = common_rates[clock];
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -1279,7 +1464,7 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ skl_edp_set_pll_config(pipe_config, common_rates[clock]);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -2557,11 +2742,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -2691,11 +2871,14 @@ static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2719,6 +2902,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
@@ -3201,6 +3386,9 @@ intel_hsw_signal_levels(uint8_t train_set)
return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8);
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DDI_BUF_TRANS_SELECT(9);
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
@@ -3358,6 +3546,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ if (intel_dp->num_sink_rates)
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &intel_dp->rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
@@ -3570,6 +3761,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint8_t rev;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3601,6 +3793,32 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
} else
intel_dp->use_tps3 = false;
+ /* Intermediate frequency support */
+ if (is_edp(intel_dp) &&
+ (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (rev >= 0x03)) { /* eDp v1.4 or higher */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
+ intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SUPPORTED_LINK_RATES,
+ sink_rates,
+ sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ intel_dp->sink_rates[i] = val * 200;
+ }
+ intel_dp->num_sink_rates = i;
+ }
+
+ intel_dp_print_rates(intel_dp);
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -3803,7 +4021,7 @@ go_again:
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-void
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4390,6 +4608,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4736,6 +4955,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
+/**
+ * intel_dp_set_drrs_state - program registers for RR switch to take effect
+ * @dev: DRM device
+ * @refresh_rate: RR to be programmed
+ *
+ * This function gets called when refresh rate (RR) has to be changed from
+ * one frequency to another. Switches can be between high and low RR
+ * supported by the panel or to any other RR based on media playback (in
+ * this case, RR value needs to be passed from user space).
+ *
+ * The caller of this function needs to take a lock on dev_priv->drrs.
+ */
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4764,7 +4995,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = encoder->new_crtc;
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -4793,14 +5024,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+ switch (index) {
+ case DRRS_HIGH_RR:
+ intel_dp_set_m_n(intel_crtc, M1_N1);
+ break;
+ case DRRS_LOW_RR:
+ intel_dp_set_m_n(intel_crtc, M2_N2);
+ break;
+ case DRRS_MAX_RR:
+ default:
+ DRM_ERROR("Unsupported refreshrate type\n");
+ }
+ } else if (INTEL_INFO(dev)->gen > 6) {
reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
+
if (index > DRRS_HIGH_RR) {
- val |= PIPECONF_EDP_RR_MODE_SWITCH;
- intel_dp_set_m_n(intel_crtc);
+ if (IS_VALLEYVIEW(dev))
+ val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ if (IS_VALLEYVIEW(dev))
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
@@ -4810,6 +5059,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
+/**
+ * intel_edp_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4837,6 +5092,11 @@ unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ *
+ */
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4892,10 +5152,20 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
downclock_mode->vrefresh);
unlock:
-
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_invalidate - Invalidate DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is a disturbance on screen (due to cursor movement/time
+ * update etc), DRRS needs to be invalidated, i.e. need to switch to
+ * high RR.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4903,15 +5173,21 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- cancel_delayed_work_sync(&dev_priv->drrs.work);
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
@@ -4923,6 +5199,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_flush - Flush DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is no movement on screen, DRRS work can be scheduled.
+ * This DRRS work is responsible for setting relevant registers after a
+ * timeout of 1 second.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4930,16 +5217,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
@@ -4947,6 +5239,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * eDP DRRS:-
+ * The implementation is based on frontbuffer tracking implementation.
+ * When there is a disturbance on the screen triggered by user activity or a
+ * periodic system activity, DRRS is disabled (RR is changed to high RR).
+ * When there is no movement on screen, after a timeout of 1 second, a switch
+ * to low RR is made.
+ * For integration with frontbuffer tracking code,
+ * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+/**
+ * intel_dp_drrs_init - Init basic DRRS work and mutex.
+ * @intel_connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
@@ -4956,6 +5298,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
+ mutex_init(&dev_priv->drrs.mutex);
+
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
@@ -4970,14 +5315,10 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
(dev, fixed_mode, connector);
if (!downclock_mode) {
- DRM_DEBUG_KMS("DRRS not supported\n");
+ DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
}
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
-
- mutex_init(&dev_priv->drrs.mutex);
-
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
@@ -5000,8 +5341,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
-
if (!is_edp(intel_dp))
return true;
@@ -5251,7 +5590,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9f67a379a9a5..5cb47482d29f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- int bpp;
- int lane_count, slots;
+ struct drm_atomic_state *state;
+ int bpp, i;
+ int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL, *intel_connector;
+ struct intel_connector *found = NULL;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+
+ rate = intel_dp_max_link_rate(intel_dp);
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
+ } else {
+ intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
+ intel_dp->rate_select = 0;
+ }
+
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = rate;
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
- if (intel_connector->new_encoder == encoder) {
- found = intel_connector;
+ state = pipe_config->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ if (state->connector_states[i]->best_encoder == &encoder->base) {
+ found = to_intel_connector(state->connectors[i]);
break;
}
}
@@ -140,7 +155,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, intel_connector) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
@@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -399,7 +415,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_connector *connector;
int i;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba243db35840..897f17db08af 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
/**
* _wait_for - magic (register) wait macro
@@ -53,8 +54,8 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
+ if ((W) && drm_can_sleep()) { \
+ usleep_range((W)*1000, (W)*2000); \
} else { \
cpu_relax(); \
} \
@@ -255,6 +256,7 @@ struct intel_plane_state {
};
struct intel_initial_plane_config {
+ struct intel_framebuffer *fb;
unsigned int tiling;
int size;
u32 base;
@@ -460,7 +462,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
@@ -497,16 +498,20 @@ struct intel_plane_wm_parameters {
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
+ u64 tiling;
+ unsigned int rotation;
};
struct intel_plane {
struct drm_plane base;
int plane;
enum pipe pipe;
- struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
+ /* FIXME convert to properties */
+ struct drm_intel_sprite_colorkey ckey;
+
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
@@ -523,7 +528,6 @@ struct intel_plane {
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -534,10 +538,6 @@ struct intel_plane {
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
- int (*update_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
- void (*get_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
};
struct intel_watermark_params {
@@ -560,6 +560,7 @@ struct cxsr_latency {
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
@@ -589,6 +590,26 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
+/*
+ * enum link_m_n_set:
+ * When platform provides two set of M_N registers for dp, we can
+ * program them and switch between them incase of DRRS.
+ * But When only one such register is provided, we have to program the
+ * required divider value on that registers itself based on the DRRS state.
+ *
+ * M1_N1 : Program dp_m_n on M1_N1 registers
+ * dp_m2_n2 on M2_N2 registers (If supported)
+ *
+ * M2_N2 : Program dp_m2_n2 on M1_N1 registers
+ * M2_N2 registers are not supported
+ */
+
+enum link_m_n_set {
+ /* Sets the m1_n1 and m2_n2 */
+ M1_N1 = 0,
+ M2_N2
+};
+
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -598,10 +619,14 @@ struct intel_dp {
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
+ uint8_t rate_select;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
+ uint8_t num_sink_rates;
+ int sink_rates[DP_MAX_SUPPORTED_RATES];
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -707,7 +732,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
struct drm_crtc *crtc;
- struct drm_i915_gem_object *old_fb_obj;
+ struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
@@ -814,7 +839,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -849,7 +875,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -874,10 +901,14 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
-int intel_fb_align_height(struct drm_device *dev, int height,
- unsigned int tiling);
+unsigned int intel_fb_align_height(struct drm_device *dev,
+ unsigned int height,
+ uint32_t pixel_format,
+ uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
@@ -896,6 +927,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
+int intel_connector_init(struct intel_connector *);
+struct intel_connector *intel_connector_alloc(void);
void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_modeset_check_state(struct drm_device *dev);
@@ -925,11 +958,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined);
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -939,9 +973,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
@@ -951,6 +987,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val);
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier);
+
+static inline bool
+intel_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+}
+
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -990,7 +1039,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc);
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
@@ -1005,6 +1054,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1014,7 +1066,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
@@ -1029,17 +1080,11 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
-int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
-int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int intel_disable_plane(struct drm_plane *plane);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
@@ -1094,7 +1139,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1210,8 +1259,9 @@ void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
-void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1228,14 +1278,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1258,6 +1303,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+static inline struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_PTR(PTR_ERR(crtc_state));
+
+ return to_intel_crtc_state(crtc_state);
+}
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 10ab68457ca8..51966426addf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), val);
+ I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
@@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -1006,7 +1007,7 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_dsi)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dsi);
return;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index d8579510beb0..770040ff486e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
@@ -468,7 +469,7 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_dvo)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dvo);
return;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 624d1d92d284..4165ce0644f7 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -78,7 +78,8 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
dev_priv->fbc.enabled = true;
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ /* Note: fbc.threshold == 1 for i8xx */
+ cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -173,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void snb_fbc_blit_update(struct drm_device *dev)
+static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ POSTING_READ(MSG_FBC_REND_STATE);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -238,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
}
+ intel_fbc_nuke(dev_priv);
+
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -319,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
+ intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -339,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->fbc.enabled;
}
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
@@ -368,7 +338,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
@@ -459,7 +429,7 @@ void intel_fbc_disable(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
+ dev_priv->fbc.crtc = NULL;
}
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
@@ -472,6 +442,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
return true;
}
+static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
+{
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ enum pipe pipe;
+ bool pipe_a_only = false, one_pipe_only = false;
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ pipe_a_only = true;
+ else if (INTEL_INFO(dev_priv)->gen <= 4)
+ one_pipe_only = true;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (one_pipe_only && crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ return NULL;
+ }
+ crtc = tmp_crtc;
+ }
+
+ if (pipe_a_only)
+ break;
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ return NULL;
+ }
+
+ return crtc;
+}
+
/**
* intel_fbc_update - enable/disable FBC as needed
* @dev: the drm_device
@@ -494,22 +501,30 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
void intel_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ if (!HAS_FBC(dev))
return;
+
+ /* disable framebuffer compression in vGPU */
+ if (intel_vgpu_active(dev))
+ i915.enable_fbc = 0;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
}
- if (!i915.powersave) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
+ goto out_disable;
}
/*
@@ -521,39 +536,15 @@ void intel_fbc_update(struct drm_device *dev)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
+ crtc = intel_fbc_find_crtc(dev_priv);
+ if (!crtc)
goto out_disable;
- }
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
@@ -617,7 +608,7 @@ void intel_fbc_update(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->fbc.plane == intel_crtc->plane &&
+ if (dev_priv->fbc.crtc == intel_crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
@@ -663,6 +654,44 @@ out_disable:
i915_gem_stolen_cleanup_compression(dev);
}
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned int fbc_bits;
+
+ if (origin == ORIGIN_GTT)
+ return;
+
+ if (dev_priv->fbc.enabled)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
+ else if (dev_priv->fbc.fbc_work)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
+ to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ else
+ fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+
+ dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+
+ if (dev_priv->fbc.busy_bits)
+ intel_fbc_disable(dev);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->fbc.busy_bits)
+ return;
+
+ dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+
+ if (!dev_priv->fbc.busy_bits)
+ intel_fbc_update(dev);
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -671,11 +700,22 @@ out_disable:
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
+ dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->fbc.possible_framebuffer_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(pipe);
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ break;
+ }
+
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3001a8674611..4e7e7da2e03b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info)
return ret;
}
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_blank(blank, info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = {
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
+ .fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
@@ -126,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
@@ -594,7 +619,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
- plane_config->tiling);
+ fb->base.pixel_format,
+ fb->base.modifier[0]);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 73cb6e036445..a20cffb78c0f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -110,16 +110,11 @@ static void intel_mark_fb_busy(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- if (!i915.powersave)
- return;
-
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
}
}
@@ -127,6 +122,7 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
@@ -135,7 +131,8 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -158,6 +155,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
/**
@@ -185,16 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (dev_priv->fbc.need_sw_cache_clean) {
- dev_priv->fbc.need_sw_cache_clean = false;
- bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
- }
+ intel_fbc_flush(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 995c5b261f4f..bfbe07b6ddce 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
+ int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
@@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(encoder->new_crtc)) {
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1504,11 +1515,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -1618,6 +1624,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -1743,7 +1750,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e8d3da9f3373..fcb074bd55dc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -254,8 +254,10 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
{
+ struct drm_device *dev = ring->dev;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
@@ -272,6 +274,13 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
+ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+ if (IS_GEN9(dev) &&
+ INTEL_REVID(dev) <= SKL_REVID_B0 &&
+ (ring->id == BCS || ring->id == VCS ||
+ ring->id == VECS || ring->id == VCS2))
+ desc |= GEN8_CTX_FORCE_RESTORE;
+
return desc;
}
@@ -286,13 +295,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
- temp = execlists_ctx_descriptor(ctx_obj1);
+ temp = execlists_ctx_descriptor(ring, ctx_obj1);
else
temp = 0;
desc[1] = (u32)(temp >> 32);
desc[0] = (u32)temp;
- temp = execlists_ctx_descriptor(ctx_obj0);
+ temp = execlists_ctx_descriptor(ring, ctx_obj0);
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
@@ -612,7 +621,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
- * @flags: translated execbuffer call flags.
+ * @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@@ -625,7 +634,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -698,10 +707,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
if (ret)
return ret;
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -776,7 +787,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-/**
+/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
*
@@ -785,9 +796,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
+static void
+intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -876,12 +888,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
return ret;
}
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
+ request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request;
return 0;
@@ -1140,11 +1149,22 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
return init_workarounds_ring(ring);
}
+static int gen9_init_render_ring(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = gen8_init_common_ring(ring);
+ if (ret)
+ return ret;
+
+ return init_workarounds_ring(ring);
+}
+
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags)
+ u64 offset, unsigned dispatch_flags)
{
- bool ppgtt = !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@@ -1316,6 +1336,39 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
return 0;
}
+static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct render_state so;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
+ struct drm_file *file = file_priv ? file_priv->file : NULL;
+ int ret;
+
+ ret = i915_gem_render_state_prepare(ring, &so);
+ if (ret)
+ return ret;
+
+ if (so.rodata == NULL)
+ return 0;
+
+ ret = ring->emit_bb_start(ringbuf,
+ ctx,
+ so.ggtt_offset,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+
+ ret = __i915_add_request(ring, file, so.obj);
+ /* intel_logical_ring_add_request moves object to inactive if it
+ * fails */
+out:
+ i915_gem_render_state_fini(&so);
+ return ret;
+}
+
static int gen8_init_rcs_context(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
@@ -1399,7 +1452,10 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init_hw = gen8_init_render_ring;
+ if (INTEL_INFO(dev)->gen >= 9)
+ ring->init_hw = gen9_init_render_ring;
+ else
+ ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
@@ -1581,37 +1637,47 @@ cleanup_render_ring:
return ret;
}
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static u32
+make_rpcs(struct drm_device *dev)
{
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
- struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
- int ret;
-
- ret = i915_gem_render_state_prepare(ring, &so);
- if (ret)
- return ret;
+ u32 rpcs = 0;
- if (so.rodata == NULL)
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
return 0;
- ret = ring->emit_bb_start(ringbuf,
- ctx,
- so.ggtt_offset,
- I915_DISPATCH_SECURE);
- if (ret)
- goto out;
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (INTEL_INFO(dev)->has_slice_pg) {
+ rpcs |= GEN8_RPCS_S_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->slice_total <<
+ GEN8_RPCS_S_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ if (INTEL_INFO(dev)->has_subslice_pg) {
+ rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ GEN8_RPCS_SS_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- ret = __i915_add_request(ring, file, so.obj);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
-out:
- i915_gem_render_state_fini(&so);
- return ret;
+ if (INTEL_INFO(dev)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MIN_SHIFT;
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MAX_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
}
static int
@@ -1659,7 +1725,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
- _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1706,18 +1773,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
- reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
+ reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
+ reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
}
kunmap_atomic(reg_state);
@@ -1925,3 +1992,38 @@ error_unpin_ctx:
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
+
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[ring->id].ringbuf;
+ uint32_t *reg_state;
+ struct page *page;
+
+ if (!ctx_obj)
+ continue;
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ WARN(1, "Failed get_pages for context obj\n");
+ continue;
+ }
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
+ reg_state[CTX_RING_HEAD+1] = 0;
+ reg_state[CTX_RING_TAIL+1] = 0;
+
+ kunmap_atomic(reg_state);
+
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 6f2d7da594f6..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -30,6 +30,8 @@
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
@@ -40,10 +42,6 @@ int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx);
-void intel_logical_ring_advance_and_submit(
- struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -70,13 +68,13 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
int num_dwords);
/* Logical Ring Contexts */
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx);
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -86,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ u64 exec_start, u32 dispatch_flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 071b96d6e146..5abda1d2c018 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
@@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
- if (crtc && crtc->enabled) {
+ if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
@@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -945,6 +946,12 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
+ if (intel_connector_init(&lvds_connector->base) < 0) {
+ kfree(lvds_connector);
+ kfree(lvds_encoder);
+ return;
+ }
+
lvds_encoder->attached_connector = lvds_connector;
intel_encoder = &lvds_encoder->base;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d8de1d5140a7..71e87abdcae7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
- }
+ intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index f93dfc174495..dd92122ed95c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ &i915_ggtt_view_normal);
if (ret != 0)
return ret;
@@ -1065,7 +1066,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1261,7 +1261,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct overlay_registers __iomem *regs;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 24d77ddcc5f4..fa4ccb346389 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -56,24 +56,42 @@ static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * WaDisableSDEUnitClockGating:skl
- * This seems to be a pre-production w/a.
- */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
- GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+}
- /*
- * WaDisableDgMirrorFixInHalfSliceChicken5:skl
- * This is a pre-production w/a.
- */
- I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
- I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
- ~GEN9_DG_MIRROR_FIX_ENABLE);
+static void skl_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Wa4x4STCOptimizationDisable:skl */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ gen9_init_clock_gating(dev);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0) {
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * WaSetGAPSunitClckGateDisable:skl
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /* WaDisableHDCInvalidation:skl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ I915_READ(FF_SLICE_CS_CHICKEN2) |
+ GEN9_TSG_BARRIER_ACK_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_E0)
+ /* WaDisableLSQCROPERFforOCL:skl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -245,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_device *dev = dev_priv->dev;
@@ -252,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ if (IS_CHERRYVIEW(dev))
+ chv_set_memory_pm5(dev_priv, enable);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
@@ -274,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
enable ? "enabled" : "disabled");
}
+
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -290,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
*/
static const int pessimal_latency_ns = 5000;
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static int vlv_get_fifo_size(struct drm_device *dev,
+ enum pipe pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int sprite0_start, sprite1_start, size;
+
+ switch (pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = I915_READ(DSPARB2);
+ dsparb3 = I915_READ(DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ return 0;
+ }
+
+ switch (plane) {
+ case 0:
+ size = sprite0_start;
+ break;
+ case 1:
+ size = sprite1_start - sprite0_start;
+ break;
+ case 2:
+ size = 512 - 1 - sprite1_start;
+ break;
+ default:
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
+ pipe_name(pipe), plane == 0 ? "primary" : "sprite",
+ plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
+ size);
+
+ return size;
+}
+
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -535,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int clock;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -547,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
+ reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
@@ -557,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ reg |= FW_WM(wm, CURSOR_SR);
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
@@ -566,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
+ reg |= FW_WM(wm, HPLL_SR);
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
@@ -575,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
@@ -611,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -626,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
+ entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -698,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -712,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -721,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
- int pixel_size,
- int *prec_mult,
- int *drain_latency)
-{
- struct drm_device *dev = crtc->dev;
- int entries;
- int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return false;
+static void vlv_write_wm_values(struct intel_crtc *crtc,
+ const struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return false;
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (IS_CHERRYVIEW(dev))
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
- DRAIN_LATENCY_PRECISION_16;
- else
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
- *drain_latency = (64 * (*prec_mult) * 4) / entries;
+ I915_WRITE(DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ I915_WRITE(DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ I915_WRITE(DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ I915_WRITE(DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ I915_WRITE(DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ } else {
+ I915_WRITE(DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ }
- if (*drain_latency > DRAIN_LATENCY_MASK)
- *drain_latency = DRAIN_LATENCY_MASK;
+ POSTING_READ(DSPFW1);
- return true;
+ dev_priv->wm.vlv = *wm;
}
-/*
- * Update drain latency registers of memory arbiter
- *
- * Valleyview SoC has a new memory arbiter and needs drain latency registers
- * to be programmed. Each plane has a drain latency multiplier and a drain
- * latency value.
- */
+#undef FW_WM_VLV
-static void vlv_update_drain_latency(struct drm_crtc *crtc)
+static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pixel_size;
- int drain_latency;
- enum pipe pipe = intel_crtc->pipe;
- int plane_prec, prec_mult, plane_dl;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ int entries, prec_mult, drain_latency, pixel_size;
+ int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
- (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!intel_crtc->active || !plane->state->fb)
+ return 0;
- if (!intel_crtc_active(crtc)) {
- I915_WRITE(VLV_DDL(pipe), plane_dl);
- return;
- }
+ if (WARN(clock == 0, "Pixel clock is zero!\n"))
+ return 0;
- /* Primary plane Drain Latency */
- pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
- if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_PLANE_PRECISION_HIGH :
- DDL_PLANE_PRECISION_LOW;
- plane_dl |= plane_prec | drain_latency;
- }
+ pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
- /* Cursor Drain Latency
- * BPP is always 4 for cursor
- */
- pixel_size = 4;
+ if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
+ return 0;
- /* Program cursor DL only if it is enabled */
- if (intel_crtc->cursor_base &&
- vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_CURSOR_PRECISION_HIGH :
- DDL_CURSOR_PRECISION_LOW;
- plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+
+ prec_mult = high_precision;
+ drain_latency = 64 * prec_mult * 4 / entries;
+
+ if (drain_latency > DRAIN_LATENCY_MASK) {
+ prec_mult /= 2;
+ drain_latency = 64 * prec_mult * 4 / entries;
}
- I915_WRITE(VLV_DDL(pipe), plane_dl);
-}
+ if (drain_latency > DRAIN_LATENCY_MASK)
+ drain_latency = DRAIN_LATENCY_MASK;
-#define single_plane_enabled(mask) is_power_of_2(mask)
+ return drain_latency | (prec_mult == high_precision ?
+ DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static int vlv_compute_wm(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ int fifo_size)
{
- struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
- bool cxsr_enabled;
+ int clock, entries, pixel_size;
- vlv_update_drain_latency(crtc);
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!crtc->active || !plane->base.state->fb)
+ return 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ clock = crtc->config->base.adjusted_mode.crtc_clock;
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
+ /*
+ * Set up the watermark such that we don't start issuing memory
+ * requests until we are within PND's max deadline value (256us).
+ * Idea being to be idle as long as possible while still taking
+ * advatange of PND's deadline scheduling. The limit of 8
+ * cachelines (used when the FIFO will anyway drain in less time
+ * than 256us) should match what we would be done if trickle
+ * feed were enabled.
+ */
+ return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+}
+
+static bool vlv_compute_sr_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_crtc *crtc;
+ enum pipe pipe = INVALID_PIPE;
+ int num_planes = 0;
+ int fifo_size = 0;
+ struct intel_plane *plane;
+
+ wm->sr.cursor = wm->sr.plane = 0;
+
+ crtc = single_enabled_crtc(dev);
+ /* maxfifo not supported on pipe C */
+ if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
+ pipe = to_intel_crtc(crtc)->pipe;
+ num_planes = !!wm->pipe[pipe].primary +
+ !!wm->pipe[pipe].sprite[0] +
+ !!wm->pipe[pipe].sprite[1];
+ fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
+ if (fifo_size == 0 || num_planes > 1)
+ return false;
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
+ to_intel_plane(crtc->cursor), 0x3f);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (plane->pipe != pipe)
+ continue;
+
+ wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
+ plane, fifo_size);
+ if (wm->sr.plane != 0)
+ break;
+ }
+
+ return true;
}
-static void cherryview_update_wm(struct drm_crtc *crtc)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, planec_wm;
- int cursora_wm, cursorb_wm, cursorc_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- vlv_update_drain_latency(crtc);
+ wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
+ wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->primary),
+ vlv_get_fifo_size(dev, pipe, 0));
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
+ wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->cursor),
+ 0x3f);
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
- if (g4x_compute_wm0(dev, PIPE_C,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planec_wm, &cursorc_wm))
- enabled |= 1 << PIPE_C;
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
- }
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.sr.plane, wm.sr.cursor);
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- planec_wm, cursorc_wm,
- plane_sr, cursor_sr);
+ /*
+ * FIXME DDR DVFS introduces massive memory latencies which
+ * are not known to system agent so any deadline specified
+ * by the display may not be respected. To support DDR DVFS
+ * the watermark code needs to be rewritten to essentially
+ * bypass deadline mechanism and rely solely on the
+ * watermarks. For now disable DDR DVFS.
+ */
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_set_memory_dvfs(dev_priv, false);
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
- I915_WRITE(DSPFW9_CHV,
- (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
- DSPFW_CURSORC_MASK)) |
- (planec_wm << DSPFW_PLANEC_SHIFT) |
- (cursorc_wm << DSPFW_CURSORC_SHIFT));
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -961,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
int sprite = to_intel_plane(plane)->plane;
- int drain_latency;
- int plane_prec;
- int sprite_dl;
- int prec_mult;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
- (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
+ if (enabled) {
+ wm.ddl[pipe].sprite[sprite] =
+ vlv_compute_drain_latency(crtc, plane);
- if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
- &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_SPRITE_PRECISION_HIGH(sprite) :
- DDL_SPRITE_PRECISION_LOW(sprite);
- sprite_dl |= plane_prec |
- (drain_latency << DDL_SPRITE_SHIFT(sprite));
+ wm.pipe[pipe].sprite[sprite] =
+ vlv_compute_wm(intel_crtc,
+ to_intel_plane(plane),
+ vlv_get_fifo_size(dev, pipe, sprite+1));
+ } else {
+ wm.ddl[pipe].sprite[sprite] = 0;
+ wm.pipe[pipe].sprite[sprite] = 0;
}
- I915_WRITE(VLV_DDL(pipe), sprite_dl);
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ sprite_name(pipe, sprite),
+ wm.pipe[pipe].sprite[sprite],
+ wm.sr.plane, wm.sr.cursor);
+
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1027,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc)
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
+ FW_WM(plane_sr, SR) |
+ FW_WM(cursorb_wm, CURSORB) |
+ FW_WM(planeb_wm, PLANEB) |
+ FW_WM(planea_wm, PLANEA));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
+ FW_WM(cursora_wm, CURSORA));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -1062,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1080,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * to_intel_crtc(crtc)->cursor_width;
+ pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1103,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << DSPFW_CURSORB_SHIFT) |
- (8 << DSPFW_PLANEB_SHIFT) |
- (8 << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
- (8 << DSPFW_PLANEC_SHIFT_OLD));
+ I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
+#undef FW_WM
+
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
@@ -1139,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1161,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1184,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
@@ -1208,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
+ int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1645,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -1711,6 +1849,8 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
GEN9_MEM_LATENCY_LEVEL_MASK;
/*
+ * WaWmMemoryReadLatency:skl
+ *
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from
* the punit.
@@ -1898,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
- p->cur.bytes_per_pixel = 4;
+
+ if (crtc->primary->state->fb) {
+ p->pri.enabled = true;
+ p->pri.bytes_per_pixel =
+ crtc->primary->state->fb->bits_per_pixel / 8;
+ } else {
+ p->pri.enabled = false;
+ p->pri.bytes_per_pixel = 0;
+ }
+
+ if (crtc->cursor->state->fb) {
+ p->cur.enabled = true;
+ p->cur.bytes_per_pixel = 4;
+ } else {
+ p->cur.enabled = false;
+ p->cur.bytes_per_pixel = 0;
+ }
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
- p->cur.horiz_pixels = intel_crtc->cursor_width;
- /* TODO: for now, assume primary and cursor planes are always enabled. */
- p->pri.enabled = true;
- p->cur.enabled = true;
+ p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2410,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
continue;
if (crtc == for_crtc)
@@ -2443,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
for_each_pipe(dev_priv, pipe) {
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -2498,10 +2649,12 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
+ uint16_t minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
@@ -2520,9 +2673,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks;
+ /* 1. Allocate the mininum required blocks for each active plane */
+ for_each_plane(dev_priv, pipe, plane) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ minimum[plane] = 8;
+ alloc_size -= minimum[plane];
+ }
+
/*
- * Each active plane get a portion of the remaining space, in
- * proportion to the amount of data they need to fetch from memory.
+ * 2. Distribute the remaining space in proportion to the amount of
+ * data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
@@ -2544,8 +2709,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
- plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks = minimum[plane];
+ plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
@@ -2575,7 +2741,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
if (latency == 0)
return UINT_MAX;
- wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
return ret;
@@ -2583,17 +2749,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
- uint32_t latency)
+ uint64_t tiling, uint32_t latency)
{
- uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+ uint32_t ret;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t wm_intermediate_val;
if (latency == 0)
return UINT_MAX;
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+
+ if (tiling == I915_FORMAT_MOD_Y_TILED ||
+ tiling == I915_FORMAT_MOD_Yf_TILED) {
+ plane_bytes_per_line *= 4;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ plane_blocks_per_line /= 4;
+ } else {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ }
+
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
- plane_bytes_per_line;
+ plane_blocks_per_line;
return ret;
}
@@ -2624,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct drm_plane *plane;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- config->num_pipes_active += intel_crtc_active(crtc);
+ config->num_pipes_active += to_intel_crtc(crtc)->active;
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2642,26 +2820,40 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
+ struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
- p->active = intel_crtc_active(crtc);
+ p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
- /*
- * For now, assume primary and cursor planes are always enabled.
- */
- p->plane[0].enabled = true;
- p->plane[0].bytes_per_pixel =
- crtc->primary->fb->bits_per_pixel / 8;
+ fb = crtc->primary->state->fb;
+ if (fb) {
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->plane[0].tiling = fb->modifier[0];
+ } else {
+ p->plane[0].enabled = false;
+ p->plane[0].bytes_per_pixel = 0;
+ p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
+ }
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
-
- p->cursor.enabled = true;
- p->cursor.bytes_per_pixel = 4;
- p->cursor.horiz_pixels = intel_crtc->cursor_width ?
- intel_crtc->cursor_width : 64;
+ p->plane[0].rotation = crtc->primary->state->rotation;
+
+ fb = crtc->cursor->state->fb;
+ if (fb) {
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
+ p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
+ } else {
+ p->cursor.enabled = false;
+ p->cursor.bytes_per_pixel = 0;
+ p->cursor.horiz_pixels = 64;
+ p->cursor.vert_pixels = 64;
+ }
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2673,41 +2865,74 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
}
}
-static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation,
- uint32_t mem_value,
+ int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
- uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
- uint32_t result_bytes;
+ uint32_t latency = dev_priv->wm.skl_latency[level];
+ uint32_t method1, method2;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t res_blocks, res_lines;
+ uint32_t selected_result;
- if (mem_value == 0 || !p->active || !p_params->enabled)
+ if (latency == 0 || !p->active || !p_params->enabled)
return false;
method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel,
- mem_value);
+ latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
p_params->bytes_per_pixel,
- mem_value);
+ p_params->tiling,
+ latency);
plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
+ uint32_t min_scanlines = 4;
+ uint32_t y_tile_minimum;
+ if (intel_rotation_90_or_270(p_params->rotation)) {
+ switch (p_params->bytes_per_pixel) {
+ case 1:
+ min_scanlines = 16;
+ break;
+ case 2:
+ min_scanlines = 8;
+ break;
+ case 8:
+ WARN(1, "Unsupported pixel depth for rotation");
+ }
+ }
+ y_tile_minimum = plane_blocks_per_line * min_scanlines;
+ selected_result = max(method2, y_tile_minimum);
+ } else {
+ if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ selected_result = min(method1, method2);
+ else
+ selected_result = method1;
+ }
- /* For now xtile and linear */
- if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
- result_bytes = min(method1, method2);
- else
- result_bytes = method1;
+ res_blocks = selected_result + 1;
+ res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
- res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
- res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+ if (level >= 1 && level <= 7) {
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
+ res_lines += 4;
+ else
+ res_blocks++;
+ }
- if (res_blocks > ddb_allocation || res_lines > 31)
+ if (res_blocks >= ddb_allocation || res_lines > 31)
return false;
*out_blocks = res_blocks;
@@ -2724,30 +2949,31 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
int num_planes,
struct skl_wm_level *result)
{
- uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks;
int i;
for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ result->plane_en[i] = skl_compute_plane_wm(dev_priv,
+ p, &p->plane[i],
ddb_blocks,
- latency,
+ level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
- result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
- latency, &result->cursor_res_b,
+ result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
+ ddb_blocks, level,
+ &result->cursor_res_b,
&result->cursor_res_l);
}
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
return 0;
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
@@ -2921,12 +3147,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
- struct drm_device *dev = dev_priv->dev;
int plane;
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
@@ -3133,12 +3358,21 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
int pixel_size, bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
+ intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
+ /*
+ * Framebuffer can be NULL on plane disable, but it does not
+ * matter for watermarks if we assume no tiling in that case.
+ */
+ if (fb)
+ intel_plane->wm.tiling = fb->modifier[0];
+ intel_plane->wm.rotation = plane->state->rotation;
skl_update_wm(crtc);
}
@@ -3287,7 +3521,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
hw->dirty[pipe] = true;
@@ -3342,7 +3576,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
- active->pipe_enabled = intel_crtc_active(crtc);
+ active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3456,41 +3690,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
pixel_size, enabled, scaled);
}
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_ggtt_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- return NULL;
-}
-
/**
* Lock protecting IPS related data structures
*/
@@ -3623,7 +3822,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
@@ -3633,9 +3832,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ if (IS_GEN9(dev_priv->dev)) {
+ limits = (dev_priv->rps.max_freq_softlimit) << 23;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ } else {
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
+ }
return limits;
}
@@ -3643,6 +3848,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
@@ -3664,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_freq_softlimit)
+ if (val <= dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_freq_softlimit)
+ if (val >= dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3675,59 +3882,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
- I915_WRITE(GEN6_RP_UP_EI, 12500);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+ ei_up = 16000;
+ threshold_up = 95;
/* Downclock if less than 85% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 85;
break;
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
- I915_WRITE(GEN6_RP_UP_EI, 10250);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+ ei_up = 13000;
+ threshold_up = 90;
/* Downclock if less than 75% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 75;
break;
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
- I915_WRITE(GEN6_RP_UP_EI, 8000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+ ei_up = 10000;
+ threshold_up = 85;
/* Downclock if less than 60% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 60;
break;
}
+ I915_WRITE(GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ I915_WRITE(GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+
+ I915_WRITE(GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
dev_priv->rps.power = new_power;
dev_priv->rps.last_adj = 0;
}
@@ -3737,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
u32 mask = 0;
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
@@ -3750,13 +3950,13 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
/* min/max delay may still have been modified so be sure to
* write the limits value.
@@ -3764,7 +3964,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN9_FREQUENCY(val));
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -3777,7 +3980,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -3786,6 +3989,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+static void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
+
+ if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ "Odd GPU freq value\n"))
+ val &= ~1;
+
+ if (val != dev_priv->rps.cur_freq)
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+ dev_priv->rps.cur_freq = val;
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+}
+
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
@@ -3798,10 +4022,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ u32 val = dev_priv->rps.idle_freq;
/* CHV and latest VLV don't need to force the gfx clock */
if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ valleyview_set_rps(dev_priv->dev, val);
return;
}
@@ -3809,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* When we are idle. Drop to min voltage state.
*/
- if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ if (dev_priv->rps.cur_freq <= val)
return;
/* Mask turbo interrupt so that they will not come in between */
@@ -3818,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, true);
- dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+ dev_priv->rps.cur_freq = val;
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
- dev_priv->rps.min_freq_softlimit);
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
@@ -3829,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, false);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+}
+
+void gen6_rps_busy(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ gen6_rps_reset_ei(dev_priv);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3842,46 +4077,34 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ u32 val;
mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
- else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ val = dev_priv->rps.max_freq_softlimit;
+ if (dev_priv->rps.enabled &&
+ dev_priv->mm.busy &&
+ dev_priv->rps.cur_freq < val) {
+ intel_set_rps(dev_priv->dev, val);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
-void valleyview_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_device *dev, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- if (val != dev_priv->rps.cur_freq)
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -3995,6 +4218,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ }
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
@@ -4011,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4035,23 +4267,21 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev);
- I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
- I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+ /* Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+
+ /* 1 second timeout*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(dev_priv, 1000000));
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
- I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
- I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
- I915_WRITE(GEN6_PMINTRMSK, 0x6);
- I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
- gen6_enable_rps_interrupts(dev);
+ /* Leaning on the below call to gen6_set_rps to program/setup the
+ * Up/Down EI & threshold registers, as well as the RP_CONTROL,
+ * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4179,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4273,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -4638,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4713,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4904,124 +5138,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
- dev_priv->ips.renderctx = NULL;
- }
-
- if (dev_priv->ips.pwrctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
- dev_priv->ips.pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx == NULL)
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.renderctx)
- return -ENOMEM;
-
- if (dev_priv->ips.pwrctx == NULL)
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- bool was_interruptible;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = ironlake_setup_rc6(dev);
- if (ret)
- return;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = intel_ring_begin(ring, 6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- dev_priv->mm.interruptible = was_interruptible;
- return;
- }
-
- intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- intel_ring_emit(ring, MI_SUSPEND_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_advance(ring);
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_ring_idle(ring);
- dev_priv->mm.interruptible = was_interruptible;
- if (ret) {
- DRM_ERROR("failed to enable ironlake power savings\n");
- ironlake_teardown_rc6(dev);
- return;
- }
-
- I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-
- intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
-}
-
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5534,12 +5650,7 @@ static void gen6_suspend_rps(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev);
}
/**
@@ -5569,7 +5680,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
- ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
@@ -5597,12 +5707,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- /*
- * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
- * added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev);
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
@@ -5619,10 +5724,16 @@ static void intel_gen6_powersave_work(struct work_struct *work)
gen6_enable_rps(dev);
__gen6_update_ring_freq(dev);
}
+
+ WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+
+ WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+
dev_priv->rps.enabled = true;
- if (INTEL_INFO(dev)->gen < 9)
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5633,10 +5744,13 @@ void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev))
+ return;
+
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6169,11 +6283,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -6221,8 +6346,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
@@ -6259,9 +6382,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -6396,7 +6517,8 @@ void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->display.init_clock_gating(dev);
+ if (dev_priv->display.init_clock_gating)
+ dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
@@ -6422,7 +6544,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
- dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.init_clock_gating = skl_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -6450,7 +6572,7 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = cherryview_update_wm;
+ dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
@@ -6618,7 +6740,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_gpu_freq(dev_priv, val);
@@ -6628,7 +6752,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_freq_opcode(dev_priv, val);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b9f40c2e0af7..a8f9348259ae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev)
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
} else {
val = I915_READ(VLV_PSRCTL(pipe));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b3c6dbd467..441e2502b889 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
-{
- int ret;
-
- if (!ring->fbc_dirty)
- return 0;
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
- /* WaFbcNukeOn3DBlt:ivb/hsw */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, value);
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_advance(ring);
-
- ring->fbc_dirty = false;
- return 0;
-}
-
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
return 0;
}
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
- if (ret)
- return ret;
-
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
- return 0;
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -502,6 +469,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
I915_WRITE(HWS_PGA, addr);
}
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ /*
+ * VCS2 actually doesn't exist on Gen7. Only shut up
+ * gcc switch check warning
+ */
+ case VCS2:
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+
+ /*
+ * Flush the TLB for this page
+ *
+ * FIXME: These two bits have disappeared on gen8, so a question
+ * arises: do we still need this and if so how should we go about
+ * invalidating the TLB?
+ */
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
+}
+
static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -788,12 +817,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- /* WaForceEnableNonCoherent:bdw */
- /* WaHdcDisableFetchWhenMasked:bdw */
- /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceEnableNonCoherent:bdw */
HDC_FORCE_NON_COHERENT |
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaHdcDisableFetchWhenMasked:bdw */
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
@@ -870,9 +901,132 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
return 0;
}
+static int gen9_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* WaDisablePartialInstShootdown:skl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0 ||
+ INTEL_REVID(dev) == SKL_REVID_B0) {
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
+ }
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
+ WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN9_RHWO_OPTIMIZATION_DISABLE);
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
+ DISABLE_PIXEL_MASK_CAMMING);
+ }
+
+ if (INTEL_REVID(dev) >= SKL_REVID_C0) {
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /*
+ *Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+ }
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /* WaDisablePartialResolveInVc:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+ /* WaCcsTlbPrefetchDisable:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+
+static int skl_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_workarounds(ring);
+
+ /* WaDisablePowerCompilerClockGating:skl */
+ if (INTEL_REVID(dev) == SKL_REVID_B0)
+ WA_SET_BIT_MASKED(HIZ_CHICKEN,
+ BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+
+ return skl_tune_iz_hashing(ring);
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -888,6 +1042,11 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
+ if (IS_SKYLAKE(dev))
+ return skl_init_workarounds(ring);
+ else if (IS_GEN9(dev))
+ return gen9_init_workarounds(ring);
+
return 0;
}
@@ -1386,68 +1545,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_engine_cs *ring)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 mmio = 0;
-
- /* The ring status page addresses are no longer next to the rest of
- * the ring registers as of gen7.
- */
- if (IS_GEN7(dev)) {
- switch (ring->id) {
- case RCS:
- mmio = RENDER_HWS_PGA_GEN7;
- break;
- case BCS:
- mmio = BLT_HWS_PGA_GEN7;
- break;
- /*
- * VCS2 actually doesn't exist on Gen7. Only shut up
- * gcc switch check warning
- */
- case VCS2:
- case VCS:
- mmio = BSD_HWS_PGA_GEN7;
- break;
- case VECS:
- mmio = VEBOX_HWS_PGA_GEN7;
- break;
- }
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
- } else {
- /* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
- }
-
- I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
- POSTING_READ(mmio);
-
- /*
- * Flush the TLB for this page
- *
- * FIXME: These two bits have disappeared on gen8, so a question
- * arises: do we still need this and if so how should we go about
- * invalidating the TLB?
- */
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
- u32 reg = RING_INSTPM(ring->mmio_base);
-
- /* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-
- I915_WRITE(reg,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
- 1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- ring->name);
- }
-}
-
static int
bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
@@ -1611,7 +1708,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1622,7 +1719,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1635,8 +1733,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
@@ -1654,7 +1752,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- if ((flags & I915_DISPATCH_PINNED) == 0) {
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1686,7 +1784,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1697,7 +1796,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1706,7 +1805,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -2097,6 +2197,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
kref_init(&request->ref);
request->ring = ring;
+ request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@@ -2273,9 +2374,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
- bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = USES_PPGTT(ring->dev) &&
+ !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -2294,8 +2396,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
int ret;
@@ -2305,7 +2407,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ?
+ (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
@@ -2317,7 +2419,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -2327,7 +2429,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -2341,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2350,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2370,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
+ if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
@@ -2379,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (!invalidate && flush) {
- if (IS_GEN7(dev))
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
- else if (IS_BROADWELL(dev))
- dev_priv->fbc.need_sw_cache_clean = true;
- }
-
return 0;
}
@@ -2612,19 +2707,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
/**
- * Initialize the second BSD ring for Broadwell GT3.
- * It is noted that this only exists on Broadwell GT3.
+ * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
- if ((INTEL_INFO(dev)->gen != 8)) {
- DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
- return -EINVAL;
- }
-
ring->name = "bsd2 ring";
ring->id = VCS2;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 714f3fdd57d2..c761fe05ad6f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags);
+ unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags);
+ u64 offset, unsigned dispatch_flags);
/**
* List of objects currently involved in rendering from the
@@ -267,7 +267,6 @@ struct intel_engine_cs {
*/
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
- bool fbc_dirty;
wait_queue_head_t irq_queue;
@@ -373,11 +372,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
+ * 0x20-0x2f: Reserved (Gen6+)
*
- * The area from dword 0x20 to 0x3ff is available for driver usage.
+ * The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
@@ -425,7 +425,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
-void intel_ring_setup_status_page(struct intel_engine_cs *ring);
int init_workarounds_ring(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49695d7d51e3..ce00e6994eeb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
- gen8_irq_power_well_post_enable(dev_priv);
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
+static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (power_well->data == SKL_DISP_PW_2) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+ }
+
+ if (power_well->data == SKL_DISP_PW_1) {
+ intel_prepare_ddi(dev);
+ gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
+ }
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -230,6 +261,141 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
}
}
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
+#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
+ SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static void skl_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ uint32_t tmp, fuse_status;
+ uint32_t req_mask, state_mask;
+ bool is_enabled, enable_requested, check_fuse_status = false;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ fuse_status = I915_READ(SKL_FUSE_STATUS);
+
+ switch (power_well->data) {
+ case SKL_DISP_PW_1:
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG0_DIST_STATUS), 1)) {
+ DRM_ERROR("PG0 not enabled\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_2:
+ if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
+ DRM_ERROR("PG1 in disabled state\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_DDI_A_E:
+ case SKL_DISP_PW_DDI_B:
+ case SKL_DISP_PW_DDI_C:
+ case SKL_DISP_PW_DDI_D:
+ case SKL_DISP_PW_MISC_IO:
+ break;
+ default:
+ WARN(1, "Unknown power well %lu\n", power_well->data);
+ return;
+ }
+
+ req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ enable_requested = tmp & req_mask;
+ state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ is_enabled = tmp & state_mask;
+
+ if (enable) {
+ if (!enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
+ }
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ state_mask), 1))
+ DRM_ERROR("%s enable timeout\n",
+ power_well->name);
+ check_fuse_status = true;
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+ }
+ }
+
+ if (check_fuse_status) {
+ if (power_well->data == SKL_DISP_PW_1) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG1_DIST_STATUS), 1))
+ DRM_ERROR("PG1 distributing status timeout\n");
+ } else if (power_well->data == SKL_DISP_PW_2) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG2_DIST_STATUS), 1))
+ DRM_ERROR("PG2 distributing status timeout\n");
+ }
+ }
+
+ if (enable && !is_enabled)
+ skl_power_well_post_enable(dev_priv, power_well);
+}
+
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -255,6 +421,36 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_set_power_well(dev_priv, power_well, false);
}
+static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
+ SKL_POWER_WELL_STATE(power_well->data);
+
+ return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
+}
+
+static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /* Clear any request made by BIOS as driver is taking over */
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void skl_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, true);
+}
+
+static void skl_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, false);
+}
+
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -829,6 +1025,13 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops skl_power_well_ops = {
+ .sync_hw = skl_power_well_sync_hw,
+ .enable = skl_power_well_enable,
+ .disable = skl_power_well_disable,
+ .is_enabled = skl_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
@@ -1059,6 +1262,57 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
return NULL;
}
+static struct i915_power_well skl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_1,
+ },
+ {
+ .name = "MISC IO power well",
+ .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_MISC_IO,
+ },
+ {
+ .name = "power well 2",
+ .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_2,
+ },
+ {
+ .name = "DDI A/E power well",
+ .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_A_E,
+ },
+ {
+ .name = "DDI B power well",
+ .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C power well",
+ .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D power well",
+ .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_D,
+ },
+};
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1085,6 +1339,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_SKYLAKE(dev_priv->dev)) {
+ set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1200,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * intel_aux_display_runtime_get - grab an auxiliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
@@ -1217,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * intel_aux_display_runtime_put - release an auxiliary power domain reference
* @dev_priv: i915 device instance
*
- * This function drops the auxilliary power domain reference obtained by
+ * This function drops the auxiliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 64ad2b40179f..e87d2f418de4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
switch (crtc->config->pixel_multiplier) {
default:
- WARN(1, "unknown pixel mutlipler specified\n");
+ WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
@@ -2425,6 +2426,22 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
}
}
+static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+{
+ struct intel_sdvo_connector *sdvo_connector;
+
+ sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!sdvo_connector)
+ return NULL;
+
+ if (intel_connector_init(&sdvo_connector->base) < 0) {
+ kfree(sdvo_connector);
+ return NULL;
+ }
+
+ return sdvo_connector;
+}
+
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
@@ -2436,7 +2453,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2490,7 +2507,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
DRM_DEBUG_KMS("initialising TV type %d\n", type);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2569,7 +2586,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9c5451c97942..a4c0a04b5044 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
if (min <= 0 || max <= 0)
return false;
- if (WARN_ON(drm_vblank_get(dev, pipe)))
+ if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return false;
local_irq_disable();
@@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
finish_wait(wq, &wait);
- drm_vblank_put(dev, pipe);
+ drm_crtc_vblank_put(&crtc->base);
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
@@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ unsigned long surf_addr;
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
- plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
- plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
- plane_ctl &= ~PLANE_CTL_TILED_MASK;
- plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
- plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
-
- /* Trickle feed has to be enabled */
- plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
@@ -245,39 +238,57 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
BUG();
}
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
+
if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- plane_ctl |= PLANE_CTL_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
+ if (key->flags) {
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ surf_addr = intel_plane_obj_offset(intel_plane, obj);
+
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -290,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- I915_WRITE(PLANE_CTL(pipe, plane),
- I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
/* Activate double buffered register update */
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
- POSTING_READ(PLANE_CTL(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane));
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
-static int
-skl_update_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
- plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-
- POSTING_READ(PLANE_CTL(pipe, plane));
-
- return 0;
-}
-
-static void
-skl_get_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
- key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
- key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
- case PLANE_CTL_KEY_ENABLE_DESTINATION:
- key->flags = I915_SET_COLORKEY_DESTINATION;
- break;
- case PLANE_CTL_KEY_ENABLE_SOURCE:
- key->flags = I915_SET_COLORKEY_SOURCE;
- break;
- default:
- key->flags = I915_SET_COLORKEY_NONE;
- }
-}
-
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
@@ -399,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -408,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPCNTR(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SP_PIXFORMAT_MASK;
- sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SP_TILED;
- sprctl &= ~SP_ROTATE_180;
+ sprctl = SP_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
@@ -474,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- sprctl |= SP_ENABLE;
-
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
@@ -503,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
@@ -536,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
- ~SP_ENABLE);
+ I915_WRITE(SPCNTR(pipe, plane), 0);
+
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
@@ -546,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
-static int
-vlv_update_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- sprctl &= ~SP_SOURCE_KEY;
- if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SP_SOURCE_KEY;
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
-
- POSTING_READ(SPKEYMSK(pipe, plane));
-
- return 0;
-}
-
-static void
-vlv_get_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
- key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
- key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- if (sprctl & SP_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -609,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPRCTL(pipe));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SPRITE_PIXFORMAT_MASK;
- sprctl &= ~SPRITE_RGB_ORDER_RGBX;
- sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SPRITE_TILED;
- sprctl &= ~SPRITE_ROTATE_180;
+ sprctl = SPRITE_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -660,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl |= SPRITE_ENABLE;
-
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
@@ -698,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPRKEYVAL(pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -739,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
I915_WRITE(SPRSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
-}
-
-static int
-ivb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
- sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
- I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
-
- POSTING_READ(SPRKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-
- if (sprctl & SPRITE_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (sprctl & SPRITE_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -814,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- dvscntr = I915_READ(DVSCNTR(pipe));
-
- /* Mask out pixel format bits in case we change it */
- dvscntr &= ~DVS_PIXFORMAT_MASK;
- dvscntr &= ~DVS_RGB_ORDER_XBGR;
- dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
- dvscntr &= ~DVS_TILED;
- dvscntr &= ~DVS_ROTATE_180;
+ dvscntr = DVS_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -862,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
- dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
@@ -894,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(DVSKEYVAL(pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -922,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
+
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
/**
@@ -993,7 +841,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.plane == intel_crtc->plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
@@ -1006,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static int
-ilk_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
- dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
- I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
-
- POSTING_READ(DVSKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-
- if (dvscntr & DVS_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (dvscntr & DVS_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
- struct drm_intel_sprite_colorkey key;
-
- intel_plane->get_colorkey(&intel_plane->base, &key);
-
- return key.flags != I915_SET_COLORKEY_NONE;
+ return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
}
static int
@@ -1076,7 +866,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1106,16 +895,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
return -EINVAL;
}
- /* Sprite planes can be linear or x-tiled surfaces */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- case I915_TILING_X:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported tiling mode\n");
- return -EINVAL;
- }
-
/*
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
@@ -1259,6 +1038,19 @@ finish:
if (!intel_crtc->primary_enabled && !state->hides_primary)
intel_crtc->atomic.post_enable_primary = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
+
+ if (!state->visible) {
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ (1 << drm_plane_index(plane));
+ }
}
return 0;
@@ -1272,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1280,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
- plane->fb = state->base.fb;
- intel_plane->obj = obj;
+ plane->fb = fb;
if (intel_crtc->active) {
intel_crtc->primary_enabled = !state->hides_primary;
@@ -1295,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
- intel_plane->update_plane(plane, crtc, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
@@ -1312,13 +1102,14 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane;
int ret = 0;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
+ if (IS_VALLEYVIEW(dev) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
@@ -1328,34 +1119,15 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
}
intel_plane = to_intel_plane(plane);
- ret = intel_plane->update_colorkey(plane, set);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *get = data;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- plane = drm_plane_find(dev, get->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ intel_plane->ckey = *set;
- intel_plane = to_intel_plane(plane);
- intel_plane->get_colorkey(plane, get);
+ /*
+ * The only way this could fail would be due to
+ * the current plane state being unsupportable already,
+ * and we dont't consider that an error for the
+ * colorkey ioctl. So just ignore any error.
+ */
+ intel_plane_restore(plane);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1364,10 +1136,10 @@ out_unlock:
int intel_plane_restore(struct drm_plane *plane)
{
- if (!plane->crtc || !plane->fb)
+ if (!plane->crtc || !plane->state->fb)
return 0;
- return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
+ return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->crtc_w, plane->state->crtc_h,
plane->state->src_x, plane->state->src_y,
@@ -1448,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
- intel_plane->update_colorkey = ilk_update_colorkey;
- intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
@@ -1473,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (IS_VALLEYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->update_colorkey = vlv_update_colorkey;
- intel_plane->get_colorkey = vlv_get_colorkey;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1497,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
- intel_plane->update_colorkey = skl_update_colorkey;
- intel_plane->get_colorkey = skl_get_colorkey;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 892d23c8479d..8b9d325bda3c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
@@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
@@ -1620,7 +1621,7 @@ intel_tv_init(struct drm_device *dev)
return;
}
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_tv);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4e8fb891d4ea..ab5cc94588e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -23,6 +23,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "i915_vgpu.h"
#include <linux/pm_runtime.h>
@@ -210,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv);
}
+static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+{
+ u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+
+ return count & GT_FIFO_FREE_ENTRIES_MASK;
+}
+
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -217,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
- dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
- u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ u32 fifo = fifo_free_entries(dev_priv);
+
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
- fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ fifo = fifo_free_entries(dev_priv);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
@@ -314,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ fifo_free_entries(dev_priv);
}
if (!restore)
@@ -328,8 +334,9 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
- (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ INTEL_INFO(dev)->gen >= 9) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
@@ -550,18 +557,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug--; /* Only report the first N failures */
}
}
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
- if (i915.mmio_debug)
+ static bool mmio_debug_once = true;
+
+ if (i915.mmio_debug || !mmio_debug_once)
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug = mmio_debug_once--;
}
}
@@ -640,6 +653,14 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN6_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
+}
+
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
@@ -703,6 +724,10 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
@@ -724,6 +749,7 @@ __gen6_read(64)
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
+#undef __vgpu_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -807,6 +833,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
GEN6_WRITE_FOOTER; \
}
+#define __vgpu_write(x) \
+static void vgpu_write##x(struct drm_i915_private *dev_priv, \
+ off_t reg, u##x val, bool trace) { \
+ GEN6_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
+}
+
static const u32 gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
@@ -924,12 +958,17 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+__vgpu_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
+#undef __vgpu_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -972,6 +1011,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->val_set = FORCEWAKE_KERNEL;
d->val_clear = 0;
} else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
@@ -1088,6 +1128,8 @@ void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_check_vgpu(dev);
+
intel_uncore_ellc_detect(dev);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@@ -1136,6 +1178,11 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
+ if (intel_vgpu_active(dev)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
+ ASSIGN_READ_MMIO_VFUNCS(vgpu);
+ }
+
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 33cdddf26684..2b81a417cf29 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -36,6 +36,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
+ select DRM_PANEL
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 87fe8ed92ebe..a3ecf1069b76 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,10 +75,10 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
},
};
-static const struct dw_hdmi_sym_term imx_sym_term[] = {
- /*pixelclk symbol term*/
- { 148500000, 0x800d, 0x0005 },
- { ~0UL, 0x0000, 0x0000 }
+static const struct dw_hdmi_phy_config imx_phy_config[] = {
+ /*pixelclk symbol term vlev */
+ { 148500000, 0x800d, 0x0005, 0x01ad},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
};
static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
@@ -123,7 +123,7 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
{
- imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
+ imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
}
static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
@@ -163,7 +163,7 @@ static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
.mpll_cfg = imx_mpll_cfg,
.cur_ctr = imx_cur_ctr,
- .sym_term = imx_sym_term,
+ .phy_config = imx_phy_config,
.dev_type = IMX6Q_HDMI,
.mode_valid = imx6q_hdmi_mode_valid,
};
@@ -171,7 +171,7 @@ static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
.mpll_cfg = imx_mpll_cfg,
.cur_ctr = imx_cur_ctr,
- .sym_term = imx_sym_term,
+ .phy_config = imx_phy_config,
.dev_type = IMX6DL_HDMI,
.mode_valid = imx6dl_hdmi_mode_valid,
};
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 84cf99f8d957..74f505b0dd02 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -103,8 +103,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
return NULL;
}
-int imx_drm_panel_format_pins(struct drm_encoder *encoder,
- u32 interface_pix_fmt, int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin)
{
struct imx_drm_crtc_helper_funcs *helper;
struct imx_drm_crtc *imx_crtc;
@@ -116,16 +116,16 @@ int imx_drm_panel_format_pins(struct drm_encoder *encoder,
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(encoder->crtc,
- interface_pix_fmt, hsync_pin, vsync_pin);
+ bus_format, hsync_pin, vsync_pin);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_panel_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
-int imx_drm_panel_format(struct drm_encoder *encoder, u32 interface_pix_fmt)
+int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
{
- return imx_drm_panel_format_pins(encoder, interface_pix_fmt, 2, 3);
+ return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
}
-EXPORT_SYMBOL_GPL(imx_drm_panel_format);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
@@ -439,7 +439,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct of_endpoint endpoint;
struct device_node *port;
int ret;
@@ -447,18 +447,15 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !imx_crtc)
return -EINVAL;
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == imx_crtc->crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ? ret : endpoint.port;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 3c559ccd6af0..28e776d8d9d2 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -18,7 +18,7 @@ struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 pix_fmt, int hsync_pin, int vsync_pin);
+ u32 bus_format, int hsync_pin, int vsync_pin);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
@@ -40,10 +40,10 @@ void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_panel_format_pins(struct drm_encoder *encoder,
- u32 interface_pix_fmt, int hsync_pin, int vsync_pin);
-int imx_drm_panel_format(struct drm_encoder *encoder,
- u32 interface_pix_fmt);
+int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
+ u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_format(struct drm_encoder *encoder,
+ u32 bus_format);
int imx_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2d6dc94e1e64..abacc8f67469 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -19,10 +19,11 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -55,12 +56,14 @@ struct imx_ldb_channel {
struct imx_ldb *ldb;
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_panel *panel;
struct device_node *child;
int chno;
void *edid;
int edid_len;
struct drm_display_mode mode;
int mode_valid;
+ int bus_format;
};
struct bus_mux {
@@ -75,6 +78,7 @@ struct imx_ldb {
struct imx_ldb_channel channel[2];
struct clk *clk[2]; /* our own clock */
struct clk *clk_sel[4]; /* parent of display clock */
+ struct clk *clk_parent[4]; /* original parent of clk_sel */
struct clk *clk_pll[2]; /* upstream clock we can adjust */
u32 ldb_ctrl;
const struct bus_mux *lvds_mux;
@@ -91,6 +95,17 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
int num_modes = 0;
+ if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
+ imx_ldb_ch->panel->funcs->get_modes) {
+ struct drm_display_info *di = &connector->display_info;
+
+ num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
+ if (!imx_ldb_ch->bus_format && di->num_bus_formats)
+ imx_ldb_ch->bus_format = di->bus_formats[0];
+ if (num_modes > 0)
+ return num_modes;
+ }
+
if (imx_ldb_ch->edid) {
drm_mode_connector_update_edid_property(connector,
imx_ldb_ch->edid);
@@ -163,24 +178,36 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- u32 pixel_fmt;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ u32 bus_format;
- switch (imx_ldb_ch->chno) {
- case 0:
- pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH0_24) ?
- V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ switch (imx_ldb_ch->bus_format) {
+ default:
+ dev_warn(ldb->dev,
+ "could not determine data mapping, default to 18-bit \"spwg\"\n");
+ /* fallthrough */
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
break;
- case 1:
- pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH1_24) ?
- V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
break;
- default:
- dev_err(ldb->dev, "unable to config di%d panel format\n",
- imx_ldb_ch->chno);
- pixel_fmt = V4L2_PIX_FMT_RGB24;
}
- imx_drm_panel_format(encoder, pixel_fmt);
+ imx_drm_set_bus_format(encoder, bus_format);
}
static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
@@ -190,6 +217,8 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+ drm_panel_prepare(imx_ldb_ch->panel);
+
if (dual) {
clk_prepare_enable(ldb->clk[0]);
clk_prepare_enable(ldb->clk[1]);
@@ -223,6 +252,8 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
}
regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+
+ drm_panel_enable(imx_ldb_ch->panel);
}
static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
@@ -274,6 +305,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int mux, ret;
/*
* imx_ldb_encoder_disable is called by
@@ -287,6 +319,8 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
(ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
return;
+ drm_panel_disable(imx_ldb_ch->panel);
+
if (imx_ldb_ch == &ldb->channel[0])
ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
else if (imx_ldb_ch == &ldb->channel[1])
@@ -298,6 +332,30 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(ldb->clk[0]);
clk_disable_unprepare(ldb->clk[1]);
}
+
+ if (ldb->lvds_mux) {
+ const struct bus_mux *lvds_mux = NULL;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ lvds_mux = &ldb->lvds_mux[0];
+ else if (imx_ldb_ch == &ldb->channel[1])
+ lvds_mux = &ldb->lvds_mux[1];
+
+ regmap_read(ldb->regmap, lvds_mux->reg, &mux);
+ mux &= lvds_mux->mask;
+ mux >>= lvds_mux->shift;
+ } else {
+ mux = (imx_ldb_ch == &ldb->channel[0]) ? 0 : 1;
+ }
+
+ /* set display clock mux back to original input clock */
+ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk_parent[mux]);
+ if (ret)
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to original parent\n",
+ mux);
+
+ drm_panel_unprepare(imx_ldb_ch->panel);
}
static struct drm_connector_funcs imx_ldb_connector_funcs = {
@@ -371,6 +429,9 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (imx_ldb_ch->panel)
+ drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+
drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
&imx_ldb_ch->encoder);
@@ -382,25 +443,39 @@ enum {
LVDS_BIT_MAP_JEIDA
};
-static const char * const imx_ldb_bit_mappings[] = {
- [LVDS_BIT_MAP_SPWG] = "spwg",
- [LVDS_BIT_MAP_JEIDA] = "jeida",
+struct imx_ldb_bit_mapping {
+ u32 bus_format;
+ u32 datawidth;
+ const char * const mapping;
+};
+
+static const struct imx_ldb_bit_mapping imx_ldb_bit_mappings[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, "jeida" },
};
-static const int of_get_data_mapping(struct device_node *np)
+static u32 of_get_bus_format(struct device *dev, struct device_node *np)
{
const char *bm;
+ u32 datawidth = 0;
int ret, i;
ret = of_property_read_string(np, "fsl,data-mapping", &bm);
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++)
- if (!strcasecmp(bm, imx_ldb_bit_mappings[i]))
- return i;
+ of_property_read_u32(np, "fsl,data-width", &datawidth);
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++) {
+ if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) &&
+ datawidth == imx_ldb_bit_mappings[i].datawidth)
+ return imx_ldb_bit_mappings[i].bus_format;
+ }
+
+ dev_err(dev, "invalid data mapping: %d-bit \"%s\"\n", datawidth, bm);
+
+ return -ENOENT;
}
static struct bus_mux imx6q_lvds_mux[2] = {
@@ -437,8 +512,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct device_node *child;
const u8 *edidp;
struct imx_ldb *imx_ldb;
- int datawidth;
- int mapping;
int dual;
int ret;
int i;
@@ -479,12 +552,15 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
imx_ldb->clk_sel[i] = NULL;
break;
}
+
+ imx_ldb->clk_parent[i] = clk_get_parent(imx_ldb->clk_sel[i]);
}
if (i == 0)
return ret;
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
+ struct device_node *port;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
@@ -503,49 +579,53 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
channel->chno = i;
channel->child = child;
+ /*
+ * The output port is port@4 with an external 4-port mux or
+ * port@2 with the internal 2-port mux.
+ */
+ port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
+ if (port) {
+ struct device_node *endpoint, *remote;
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (remote)
+ channel->panel = of_drm_find_panel(remote);
+ else
+ return -EPROBE_DEFER;
+ if (!channel->panel) {
+ dev_err(dev, "panel not found: %s\n",
+ remote->full_name);
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+
edidp = of_get_property(child, "edid", &channel->edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, channel->edid_len,
GFP_KERNEL);
- } else {
+ } else if (!channel->panel) {
ret = of_get_drm_display_mode(child, &channel->mode, 0);
if (!ret)
channel->mode_valid = 1;
}
- ret = of_property_read_u32(child, "fsl,data-width", &datawidth);
- if (ret)
- datawidth = 0;
- else if (datawidth != 18 && datawidth != 24)
- return -EINVAL;
-
- mapping = of_get_data_mapping(child);
- switch (mapping) {
- case LVDS_BIT_MAP_SPWG:
- if (datawidth == 24) {
- if (i == 0 || dual)
- imx_ldb->ldb_ctrl |=
- LDB_DATA_WIDTH_CH0_24;
- if (i == 1 || dual)
- imx_ldb->ldb_ctrl |=
- LDB_DATA_WIDTH_CH1_24;
- }
- break;
- case LVDS_BIT_MAP_JEIDA:
- if (datawidth == 18) {
- dev_err(dev, "JEIDA standard only supported in 24 bit\n");
- return -EINVAL;
- }
- if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
- LDB_BIT_MAP_CH0_JEIDA;
- if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
- LDB_BIT_MAP_CH1_JEIDA;
- break;
- default:
- dev_err(dev, "data mapping not specified or invalid\n");
- return -EINVAL;
+ channel->bus_format = of_get_bus_format(dev, child);
+ if (channel->bus_format == -EINVAL) {
+ /*
+ * If no bus format was specified in the device tree,
+ * we can still get it from the connected panel later.
+ */
+ if (channel->panel && channel->panel->funcs &&
+ channel->panel->funcs->get_modes)
+ channel->bus_format = 0;
+ }
+ if (channel->bus_format < 0) {
+ dev_err(dev, "could not determine data mapping: %d\n",
+ channel->bus_format);
+ return channel->bus_format;
}
ret = imx_ldb_register(drm, channel);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 4216e479a9be..214eceefc981 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,11 +301,11 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_panel_format_pins(encoder, IPU_PIX_FMT_GBR24,
- tve->hsync_pin, tve->vsync_pin);
+ imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+ tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
- imx_drm_panel_format(encoder, V4L2_PIX_FMT_YUV444);
+ imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
break;
}
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 98551e356e12..7bc8301fafff 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -45,7 +45,7 @@ struct ipu_crtc {
struct drm_pending_vblank_event *page_flip_event;
struct drm_framebuffer *newfb;
int irq;
- u32 interface_pix_fmt;
+ u32 bus_format;
int di_hsync_pin;
int di_vsync_pin;
};
@@ -145,7 +145,6 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
- u32 out_pixel_fmt;
int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
@@ -161,21 +160,21 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
__func__, encoder_types);
/*
- * If we have DAC, TVDAC or LDB, then we need the IPU DI clock
- * to be the same as the LDB DI clock.
+ * If we have DAC or LDB, then we need the IPU DI clock to be
+ * the same as the LDB DI clock. For TVDAC, derive the IPU DI
+ * clock from 27 MHz TVE_DI clock, but allow to divide it.
*/
if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) |
- BIT(DRM_MODE_ENCODER_TVDAC) |
BIT(DRM_MODE_ENCODER_LVDS)))
sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT;
+ else if (encoder_types & BIT(DRM_MODE_ENCODER_TVDAC))
+ sig_cfg.clkflags = IPU_DI_CLKMODE_EXT;
else
sig_cfg.clkflags = 0;
- out_pixel_fmt = ipu_crtc->interface_pix_fmt;
-
sig_cfg.enable_pol = 1;
sig_cfg.clk_pol = 0;
- sig_cfg.pixel_fmt = out_pixel_fmt;
+ sig_cfg.bus_format = ipu_crtc->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
@@ -184,7 +183,7 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
mode->flags & DRM_MODE_FLAG_INTERLACE,
- out_pixel_fmt, mode->hdisplay);
+ ipu_crtc->bus_format, mode->hdisplay);
if (ret) {
dev_err(ipu_crtc->dev,
"initializing display controller failed with %d\n",
@@ -202,7 +201,8 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
- x, y, mode->hdisplay, mode->vdisplay);
+ x, y, mode->hdisplay, mode->vdisplay,
+ mode->flags & DRM_MODE_FLAG_INTERLACE);
}
static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
@@ -291,11 +291,11 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 pixfmt, int hsync_pin, int vsync_pin)
+ u32 bus_format, int hsync_pin, int vsync_pin)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- ipu_crtc->interface_pix_fmt = pixfmt;
+ ipu_crtc->bus_format = bus_format;
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6987e16fe99b..878a643d72e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -99,7 +99,7 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h, bool interlaced)
{
struct device *dev = ipu_plane->base.dev->dev;
int ret;
@@ -213,6 +213,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
if (ret < 0)
return ret;
+ if (interlaced)
+ ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
ipu_plane->w = src_w;
ipu_plane->h = src_h;
@@ -312,7 +314,8 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16);
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
+ false);
if (ret < 0) {
ipu_plane_put_resources(ipu_plane);
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index af125fb40ef5..9b5eff18f5b8 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -42,7 +42,7 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
- uint32_t src_h);
+ uint32_t src_h, bool interlaced);
void ipu_plane_enable(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 900dda6a8e71..74a9ce40ddc4 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -33,7 +33,7 @@ struct imx_parallel_display {
struct device *dev;
void *edid;
int edid_len;
- u32 interface_pix_fmt;
+ u32 bus_format;
int mode_valid;
struct drm_display_mode mode;
struct drm_panel *panel;
@@ -118,7 +118,7 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_panel_format(encoder, imxpd->interface_pix_fmt);
+ imx_drm_set_bus_format(encoder, imxpd->bus_format);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -225,14 +225,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
if (!strcmp(fmt, "rgb24"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB24;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
else if (!strcmp(fmt, "rgb565"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB565;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
else if (!strcmp(fmt, "bgr666"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
else if (!strcmp(fmt, "lvds666"))
- imxpd->interface_pix_fmt =
- v4l2_fourcc('L', 'V', 'D', '6');
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
panel_node = of_parse_phandle(np, "fsl,panel", 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 9872ba9abf1a..6e84df9369a6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1222,7 +1222,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
u8 tmp;
if (mdev->type == G200_WB)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index bacbbb70f679..0a6f6764a37c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
Compile in support for logging register reads/writes in a format
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_DSI
+ bool "Enable DSI support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you have a need for MIPI DSI connector
+ support.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132fd76e..ab2086783fee 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,5 +50,10 @@ msm-y := \
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/dsi_phy.o \
+ mdp/mdp5/mdp5_cmd_encoder.o
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000000..28d1f95a90cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi || !msm_dsi->panel)
+ return NULL;
+
+ return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+ msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
+ msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi)
+ return;
+
+ msm_dsi_manager_unregister(msm_dsi);
+ if (msm_dsi->host) {
+ msm_dsi_host_destroy(msm_dsi->host);
+ msm_dsi->host = NULL;
+ }
+
+ platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi = NULL;
+ int ret;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "no dsi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+ if (!msm_dsi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ DBG("dsi probed=%p", msm_dsi);
+
+ msm_dsi->pdev = pdev;
+ platform_set_drvdata(pdev, msm_dsi);
+
+ /* Init dsi host */
+ ret = msm_dsi_host_init(msm_dsi);
+ if (ret)
+ goto fail;
+
+ /* Register to dsi manager */
+ ret = msm_dsi_manager_register(msm_dsi);
+ if (ret)
+ goto fail;
+
+ return msm_dsi;
+
+fail:
+ if (msm_dsi)
+ dsi_destroy(msm_dsi);
+
+ return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi;
+
+ DBG("");
+ msm_dsi = dsi_init(pdev);
+ if (IS_ERR(msm_dsi))
+ return PTR_ERR(msm_dsi);
+
+ priv->dsi[msm_dsi->id] = msm_dsi;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ int id = msm_dsi->id;
+
+ if (priv->dsi[id]) {
+ dsi_destroy(msm_dsi);
+ priv->dsi[id] = NULL;
+ }
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &dsi_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss-dsi-ctrl" },
+ {}
+};
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_dev_probe,
+ .remove = dsi_dev_remove,
+ .driver = {
+ .name = "msm_dsi",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init msm_dsi_register(void)
+{
+ DBG("");
+ platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret, i;
+
+ if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
+ !encoders[MSM_DSI_CMD_ENCODER_ID]))
+ return -EINVAL;
+
+ msm_dsi->dev = dev;
+
+ ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ goto fail;
+ }
+
+ msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->bridge)) {
+ ret = PTR_ERR(msm_dsi->bridge);
+ dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ msm_dsi->bridge = NULL;
+ goto fail;
+ }
+
+ msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->connector)) {
+ ret = PTR_ERR(msm_dsi->connector);
+ dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+ msm_dsi->connector = NULL;
+ goto fail;
+ }
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ encoders[i]->bridge = msm_dsi->bridge;
+ msm_dsi->encoders[i] = encoders[i];
+ }
+
+ priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
+ priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+
+ return 0;
+fail:
+ if (msm_dsi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
+ if (msm_dsi->connector) {
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ msm_dsi->connector = NULL;
+ }
+ }
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000000..10f54d4e379a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/platform_device.h>
+
+#include "drm_crtc.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+
+#include "msm_drv.h"
+
+#define DSI_0 0
+#define DSI_1 1
+#define DSI_MAX 2
+
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
+struct msm_dsi {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ struct mipi_dsi_host *host;
+ struct msm_dsi_phy *phy;
+ struct drm_panel *panel;
+ unsigned long panel_flags;
+ bool phy_enabled;
+
+ /* the encoders we are hooked to (outside of dsi block) */
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+
+ int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post);
+void msm_dsi_manager_phy_disable(int id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+
+/* msm dsi */
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
+
+/* dsi host */
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+ u32 iova, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags);
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+
+/* dsi phy */
+struct msm_dsi_phy;
+enum msm_dsi_phy_type {
+ MSM_DSI_PHY_UNKNOWN,
+ MSM_DSI_PHY_28NM,
+ MSM_DSI_PHY_MAX
+};
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post);
+#endif /* __DSI_CONNECTOR_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index abf1bba520bf..1dcfae265e98 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-
-Copyright (C) 2013 by the following authors:
+- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31)
+- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -51,11 +42,11 @@ enum dsi_traffic_mode {
BURST_MODE = 2,
};
-enum dsi_dst_format {
- DST_FORMAT_RGB565 = 0,
- DST_FORMAT_RGB666 = 1,
- DST_FORMAT_RGB666_LOOSE = 2,
- DST_FORMAT_RGB888 = 3,
+enum dsi_vid_dst_format {
+ VID_DST_FORMAT_RGB565 = 0,
+ VID_DST_FORMAT_RGB666 = 1,
+ VID_DST_FORMAT_RGB666_LOOSE = 2,
+ VID_DST_FORMAT_RGB888 = 3,
};
enum dsi_rgb_swap {
@@ -69,20 +60,63 @@ enum dsi_rgb_swap {
enum dsi_cmd_trigger {
TRIGGER_NONE = 0,
+ TRIGGER_SEOF = 1,
TRIGGER_TE = 2,
TRIGGER_SW = 4,
TRIGGER_SW_SEOF = 5,
TRIGGER_SW_TE = 6,
};
+enum dsi_cmd_dst_format {
+ CMD_DST_FORMAT_RGB111 = 0,
+ CMD_DST_FORMAT_RGB332 = 3,
+ CMD_DST_FORMAT_RGB444 = 4,
+ CMD_DST_FORMAT_RGB565 = 6,
+ CMD_DST_FORMAT_RGB666 = 7,
+ CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+ LANE_SWAP_0123 = 0,
+ LANE_SWAP_3012 = 1,
+ LANE_SWAP_2301 = 2,
+ LANE_SWAP_1230 = 3,
+ LANE_SWAP_0321 = 4,
+ LANE_SWAP_1032 = 5,
+ LANE_SWAP_2103 = 6,
+ LANE_SWAP_3210 = 7,
+};
+
#define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100
#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
#define DSI_IRQ_VIDEO_DONE 0x00010000
#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_BTA_DONE 0x00100000
+#define DSI_IRQ_MASK_BTA_DONE 0x00200000
#define DSI_IRQ_ERROR 0x01000000
#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_6G_HW_VERSION 0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
#define REG_DSI_CTRL 0x00000000
#define DSI_CTRL_ENABLE 0x00000001
#define DSI_CTRL_VID_MODE_EN 0x00000002
@@ -96,11 +130,15 @@ enum dsi_cmd_trigger {
#define DSI_CTRL_CRC_CHECK 0x01000000
#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
#define DSI_STATUS0_DSI_BUSY 0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
#define REG_DSI_FIFO_STATUS 0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
#define REG_DSI_VID_CFG0 0x0000000c
#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
@@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
}
#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
-static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
{
return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
}
@@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
#define REG_DSI_VID_CFG1 0x0000001c
-#define DSI_VID_CFG1_R_SEL 0x00000010
-#define DSI_VID_CFG1_G_SEL 0x00000100
-#define DSI_VID_CFG1_B_SEL 0x00001000
-#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
-#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+#define DSI_VID_CFG1_R_SEL 0x00000001
+#define DSI_VID_CFG1_G_SEL 0x00000010
+#define DSI_VID_CFG1_B_SEL 0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
{
return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
}
-#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
-#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
-static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
-{
- return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
-}
#define REG_DSI_ACTIVE_H 0x00000020
#define DSI_ACTIVE_H_START__MASK 0x00000fff
@@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
}
-#define REG_DSI_ACTIVE_VSYNC 0x00000034
-#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
-#define DSI_ACTIVE_VSYNC_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
}
-#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
-#define DSI_ACTIVE_VSYNC_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
}
#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
#define REG_DSI_CMD_CFG0 0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL 0x00000010
+#define DSI_CMD_CFG0_G_SEL 0x00000100
+#define DSI_CMD_CFG0_B_SEL 0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
#define REG_DSI_CMD_CFG1 0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
#define REG_DSI_DMA_BASE 0x00000044
#define REG_DSI_DMA_LEN 0x00000048
+#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+}
+
#define REG_DSI_ACK_ERR_STATUS 0x00000064
static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
@@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
#define REG_DSI_TRIG_CTRL 0x00000080
-#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT 8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+ return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
#define DSI_TRIG_CTRL_TE 0x80000000
#define REG_DSI_TRIG_DMA 0x0000008c
@@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+ return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
#define REG_DSI_ERR_INT_MASK0 0x00000108
@@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define REG_DSI_RESET 0x00000114
#define REG_DSI_CLK_CTRL 0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
+#define DSI_CLK_CTRL_PCLK_ON 0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
+
+#define REG_DSI_CLK_STATUS 0x0000011c
+#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
#define REG_DSI_PHY_RESET 0x00000128
+#define DSI_PHY_RESET_RESET 0x00000001
+
+#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+ return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
+
+#define REG_DSI_VERSION 0x000001f0
+#define DSI_VERSION_MAJOR__MASK 0xff000000
+#define DSI_VERSION_MAJOR__SHIFT 24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
@@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003
#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000000..956b22492c9a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,1993 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <video/mipi_display.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+
+#define DSI_6G_REG_SHIFT 4
+
+#define DSI_REGULATOR_MAX 8
+struct dsi_reg_entry {
+ char name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+};
+
+struct dsi_reg_config {
+ int num;
+ struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
+};
+
+struct dsi_config {
+ u32 major;
+ u32 minor;
+ u32 io_offset;
+ enum msm_dsi_phy_type phy_type;
+ struct dsi_reg_config reg_cfg;
+};
+
+static const struct dsi_config dsi_cfgs[] = {
+ {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
+ { /* 8974 v1 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_0,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v2 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v3 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8084 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_2,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8916 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 2850000, 2850000, 100000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+};
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+ u32 ver;
+ u32 ver_6g;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+ * makes all other registers 4-byte shifted down.
+ */
+ ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
+ if (ver_6g == 0) {
+ ver = msm_readl(base + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver <= MSM_DSI_VER_MAJOR_V2) {
+ /* old versions */
+ *major = ver;
+ *minor = 0;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver == MSM_DSI_VER_MAJOR_6G) {
+ /* 6G version */
+ *major = ver;
+ *minor = ver_6g;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+}
+
+#define DSI_ERR_STATE_ACK 0x0000
+#define DSI_ERR_STATE_TIMEOUT 0x0001
+#define DSI_ERR_STATE_DLN0_PHY 0x0002
+#define DSI_ERR_STATE_FIFO 0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS \
+ (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+ DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+ DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+ DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+ struct mipi_dsi_host base;
+
+ struct platform_device *pdev;
+ struct drm_device *dev;
+
+ int id;
+
+ void __iomem *ctrl_base;
+ struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
+ struct clk *mdp_core_clk;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
+ struct clk *byte_clk;
+ struct clk *esc_clk;
+ struct clk *pixel_clk;
+ u32 byte_clk_rate;
+
+ struct gpio_desc *disp_en_gpio;
+ struct gpio_desc *te_gpio;
+
+ const struct dsi_config *cfg;
+
+ struct completion dma_comp;
+ struct completion video_comp;
+ struct mutex dev_mutex;
+ struct mutex cmd_mutex;
+ struct mutex clk_mutex;
+ spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+ u32 err_work_state;
+ struct work_struct err_work;
+ struct workqueue_struct *workqueue;
+
+ struct drm_gem_object *tx_gem_obj;
+ u8 *rx_buf;
+
+ struct drm_display_mode *mode;
+
+ /* Panel info */
+ struct device_node *panel_node;
+ unsigned int channel;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+
+ u32 dma_cmd_ctrl_restore;
+
+ bool registered;
+ bool power_on;
+ int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565: return 16;
+ case MIPI_DSI_FMT_RGB666_PACKED: return 18;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default: return 24;
+ }
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+ return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+ msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
+
+static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+{
+ const struct dsi_config *cfg;
+ struct regulator *gdsc_reg;
+ int i, ret;
+ u32 major = 0, minor = 0;
+
+ gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
+ if (IS_ERR_OR_NULL(gdsc_reg)) {
+ pr_err("%s: cannot get gdsc\n", __func__);
+ goto fail;
+ }
+ ret = regulator_enable(gdsc_reg);
+ if (ret) {
+ pr_err("%s: unable to enable gdsc\n", __func__);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: unable to enable ahb_clk\n", __func__);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+
+ ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+
+ clk_disable_unprepare(msm_host->ahb_clk);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ if (ret) {
+ pr_err("%s: Invalid version\n", __func__);
+ goto fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
+ cfg = dsi_cfgs + i;
+ if ((cfg->major == major) && (cfg->minor == minor))
+ return cfg;
+ }
+ pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+
+fail:
+ return NULL;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct msm_dsi_host, base);
+}
+
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i;
+
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_load(s[i].consumer,
+ regs[i].disable_load);
+
+ regulator_bulk_disable(num, s);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int ret, i;
+
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_load(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ pr_err("regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ pr_err("regulator enable failed, %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
+ return ret;
+}
+
+static int dsi_regulator_init(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i, ret;
+
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
+
+ ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
+ if (ret < 0) {
+ pr_err("%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+ ret = regulator_set_voltage(s[i].consumer,
+ regs[i].min_voltage, regs[i].max_voltage);
+ if (ret < 0) {
+ pr_err("regulator %d set voltage failed, %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret = 0;
+
+ msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(msm_host->mdp_core_clk)) {
+ ret = PTR_ERR(msm_host->mdp_core_clk);
+ pr_err("%s: Unable to get mdp core clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(msm_host->ahb_clk)) {
+ ret = PTR_ERR(msm_host->ahb_clk);
+ pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
+ if (IS_ERR(msm_host->axi_clk)) {
+ ret = PTR_ERR(msm_host->axi_clk);
+ pr_err("%s: Unable to get axi bus clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+ if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
+ ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
+ pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+ if (IS_ERR(msm_host->byte_clk)) {
+ ret = PTR_ERR(msm_host->byte_clk);
+ pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->byte_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ if (IS_ERR(msm_host->pixel_clk)) {
+ ret = PTR_ERR(msm_host->pixel_clk);
+ pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->pixel_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(msm_host->esc_clk)) {
+ ret = PTR_ERR(msm_host->esc_clk);
+ pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->esc_clk = NULL;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("id=%d", msm_host->id);
+
+ ret = clk_prepare_enable(msm_host->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mdp_core_clock, %d\n",
+ __func__, ret);
+ goto core_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto ahb_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->axi_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto axi_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
+ __func__, ret);
+ goto misc_ahb_clk_err;
+ }
+
+ return 0;
+
+misc_ahb_clk_err:
+ clk_disable_unprepare(msm_host->axi_clk);
+axi_clk_err:
+ clk_disable_unprepare(msm_host->ahb_clk);
+ahb_clk_err:
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+core_clk_err:
+ return ret;
+}
+
+static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
+ clk_disable_unprepare(msm_host->axi_clk);
+ clk_disable_unprepare(msm_host->ahb_clk);
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+}
+
+static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%d",
+ msm_host->mode->clock, msm_host->byte_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return 0;
+
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+ clk_disable_unprepare(msm_host->esc_clk);
+error:
+ return ret;
+}
+
+static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
+{
+ int ret = 0;
+
+ mutex_lock(&msm_host->clk_mutex);
+ if (enable) {
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable bus clk, %d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+ ret = dsi_link_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable link clk, %d\n",
+ __func__, ret);
+ dsi_bus_clk_disable(msm_host);
+ goto unlock_ret;
+ }
+ } else {
+ dsi_link_clk_disable(msm_host);
+ dsi_bus_clk_disable(msm_host);
+ }
+
+unlock_ret:
+ mutex_unlock(&msm_host->clk_mutex);
+ return ret;
+}
+
+static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u8 lanes = msm_host->lanes;
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u32 pclk_rate;
+
+ if (!mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ pclk_rate = mode->clock * 1000;
+ if (lanes > 0) {
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ }
+
+ DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+
+ return 0;
+}
+
+static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+ /* Make sure fully reset */
+ wmb();
+ udelay(1000);
+ dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+ udelay(100);
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+ u32 intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+ if (enable)
+ intr |= mask;
+ else
+ intr &= ~mask;
+
+ DBG("intr=%x enable=%d", intr, enable);
+
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ return BURST_MODE;
+ else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ return NON_BURST_SYNCH_PULSE;
+
+ return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
+ case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
+ default: return VID_DST_FORMAT_RGB888;
+ }
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
+ default: return CMD_DST_FORMAT_RGB888;
+ }
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ u32 clk_pre, u32 clk_post)
+{
+ u32 flags = msm_host->mode_flags;
+ enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ u32 data = 0;
+
+ if (!enable) {
+ dsi_write(msm_host, REG_DSI_CTRL, 0);
+ return;
+ }
+
+ if (flags & MIPI_DSI_MODE_VIDEO) {
+ if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+ data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+ if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+ data |= DSI_VID_CFG0_HFP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+ data |= DSI_VID_CFG0_HBP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+ data |= DSI_VID_CFG0_HSA_POWER_STOP;
+ /* Always set low power stop mode for BLLP
+ * to let command engine send packets
+ */
+ data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+ DSI_VID_CFG0_BLLP_POWER_STOP;
+ data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+ data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+ data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+ dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+ /* Do not swap RGB colors */
+ data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+ dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+ } else {
+ /* Do not swap RGB colors */
+ data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+ data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+ dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+ data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+ DSI_CMD_CFG1_WR_MEM_CONTINUE(
+ MIPI_DCS_WRITE_MEMORY_CONTINUE);
+ /* Always insert DCS command */
+ data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+ dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+ }
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+ DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+ DSI_CMD_DMA_CTRL_LOW_POWER);
+
+ data = 0;
+ /* Always assume dedicated TE pin */
+ data |= DSI_TRIG_CTRL_TE;
+ data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+ data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+ data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+ dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+ data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
+ DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+ dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+ data = 0;
+ if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+ data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+ dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+ /* allow only ack-err-status to generate interrupt */
+ dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+ data = DSI_CTRL_CLK_EN;
+
+ DBG("lane number=%d", msm_host->lanes);
+ if (msm_host->lanes == 2) {
+ data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
+ /* swap lanes for 2-lane panel for better performance */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
+ } else {
+ /* Take 4 lanes as default */
+ data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
+ DSI_CTRL_LANE3;
+ /* Do not swap lanes for 4-lane panel */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
+ }
+ data |= DSI_CTRL_ENABLE;
+
+ dsi_write(msm_host, REG_DSI_CTRL, data);
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+ u32 h_total = mode->htotal;
+ u32 v_total = mode->vtotal;
+ u32 hs_end = mode->hsync_end - mode->hsync_start;
+ u32 vs_end = mode->vsync_end - mode->vsync_start;
+ u32 ha_start = h_total - mode->hsync_start;
+ u32 ha_end = ha_start + mode->hdisplay;
+ u32 va_start = v_total - mode->vsync_start;
+ u32 va_end = va_start + mode->vdisplay;
+ u32 wc;
+
+ DBG("");
+
+ if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(msm_host, REG_DSI_ACTIVE_H,
+ DSI_ACTIVE_H_START(ha_start) |
+ DSI_ACTIVE_H_END(ha_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_V,
+ DSI_ACTIVE_V_START(va_start) |
+ DSI_ACTIVE_V_END(va_end));
+ dsi_write(msm_host, REG_DSI_TOTAL,
+ DSI_TOTAL_H_TOTAL(h_total - 1) |
+ DSI_TOTAL_V_TOTAL(v_total - 1));
+
+ dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+ DSI_ACTIVE_HSYNC_START(hs_start) |
+ DSI_ACTIVE_HSYNC_END(hs_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+ DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+ DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+ } else { /* command mode */
+ /* image data and 1 byte write_memory_start cmd */
+ wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
+ DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
+ DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+ msm_host->channel) |
+ DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+ MIPI_DSI_DCS_LONG_WRITE));
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+ }
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* clocks need to be enabled before reset */
+
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+ bool video_mode, bool enable)
+{
+ u32 dsi_ctrl;
+
+ dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (!enable) {
+ dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+ DSI_CTRL_CMD_MODE_EN);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+ DSI_IRQ_MASK_VIDEO_DONE, 0);
+ } else {
+ if (video_mode) {
+ dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+ } else { /* command mode */
+ dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+ }
+ dsi_ctrl |= DSI_CTRL_ENABLE;
+ }
+
+ dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+ u32 data;
+
+ data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+ else
+ data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+ reinit_completion(&msm_host->video_comp);
+
+ wait_for_completion_timeout(&msm_host->video_comp,
+ msecs_to_jiffies(70));
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+ if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ if (msm_host->power_on) {
+ dsi_wait4video_done(msm_host);
+ /* delay 4 ms to skip BLLP */
+ usleep_range(2000, 4000);
+ }
+}
+
+/* dsi_cmd */
+static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+ int ret;
+ u32 iova;
+
+ mutex_lock(&dev->struct_mutex);
+ msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
+ if (IS_ERR(msm_host->tx_gem_obj)) {
+ ret = PTR_ERR(msm_host->tx_gem_obj);
+ pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova, %d\n", __func__, ret);
+ return ret;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (iova & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ if (msm_host->tx_gem_obj) {
+ msm_gem_put_iova(msm_host->tx_gem_obj, 0);
+ mutex_lock(&dev->struct_mutex);
+ msm_gem_free_object(msm_host->tx_gem_obj);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mipi_dsi_packet packet;
+ int len;
+ int ret;
+ u8 *data;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ pr_err("%s: create packet failed, %d\n", __func__, ret);
+ return ret;
+ }
+ len = (packet.size + 3) & (~0x3);
+
+ if (len > tx_gem->size) {
+ pr_err("%s: packet size is too big\n", __func__);
+ return -EINVAL;
+ }
+
+ data = msm_gem_vaddr(tx_gem);
+
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* MSM specific command format in memory */
+ data[0] = packet.header[1];
+ data[1] = packet.header[2];
+ data[2] = packet.header[0];
+ data[3] = BIT(7); /* Last packet */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ data[3] |= BIT(6);
+ if (msg->rx_buf && msg->rx_len)
+ data[3] |= BIT(5);
+
+ /* Long packet */
+ if (packet.payload && packet.payload_length)
+ memcpy(data + 4, packet.payload, packet.payload_length);
+
+ /* Append 0xff to the end */
+ if (packet.size < len)
+ memset(data + packet.size, 0xff, len - packet.size);
+
+ return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 1)) {
+ *data = buf[1]; /* strip out dcs type */
+ return 1;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 2)) {
+ data[0] = buf[1]; /* strip out dcs type */
+ data[1] = buf[2];
+ return 2;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ /* strip out 4 byte dcs header */
+ if (msg->rx_buf && msg->rx_len)
+ memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+ return msg->rx_len;
+}
+
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ int ret;
+ u32 iova;
+ bool triggered;
+
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
+ }
+
+ reinit_completion(&msm_host->dma_comp);
+
+ dsi_wait4video_eng_busy(msm_host);
+
+ triggered = msm_dsi_manager_cmd_xfer_trigger(
+ msm_host->id, iova, len);
+ if (triggered) {
+ ret = wait_for_completion_timeout(&msm_host->dma_comp,
+ msecs_to_jiffies(200));
+ DBG("ret=%d", ret);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = len;
+ } else
+ ret = len;
+
+ return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+ u8 *buf, int rx_byte, int pkt_size)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ bool ack_error = false;
+ u32 read_cnt;
+ u8 reg[16];
+ int repeated_bytes = 0;
+ int buf_offset = buf - msm_host->rx_buf;
+
+ lp = (u32 *)buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ /* Calculate real read data count */
+ read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
+
+ ack_error = (rx_byte == 4) ?
+ (read_cnt == 8) : /* short pkt + 4-byte error pkt */
+ (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
+
+ if (ack_error)
+ read_cnt -= 4; /* Remove 4 byte error pkt */
+
+ /*
+ * In case of multiple reads from the panel, after the first read, there
+ * is possibility that there are some bytes in the payload repeating in
+ * the RDBK_DATA registers. Since we read all the parameters from the
+ * panel right from the first byte for every pass. We need to skip the
+ * repeating bytes and then append the new parameters to the rx buffer.
+ */
+ if (read_cnt > 16) {
+ int bytes_shifted;
+ /* Any data more than 16 bytes will be shifted out.
+ * The temp read buffer should already contain these bytes.
+ * The remaining bytes in read buffer are the repeated bytes.
+ */
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+ *temp++ = ntohl(data); /* to host byte order */
+ DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ buf[j++] = reg[i];
+
+ return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
+{
+ int len, ret;
+ int bllp_len = msm_host->mode->hdisplay *
+ dsi_get_bpp(msm_host->format) / 8;
+
+ len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
+ if (!len) {
+ pr_err("%s: failed to add cmd type = 0x%x\n",
+ __func__, msg->type);
+ return -EINVAL;
+ }
+
+ /* for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ */
+ /* TODO: if the command is sent in LP mode, the bit rate is only
+ * half of esc clk rate. In this case, if the video is already
+ * actively streaming, we need to check more carefully if the
+ * command can be fit into one BLLP.
+ */
+ if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+ pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ ret = dsi_cmd_dma_tx(msm_host, len);
+ if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
+ return -ECOMM;
+ }
+
+ return len;
+}
+
+static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+{
+ u32 data0, data1;
+
+ data0 = dsi_read(msm_host, REG_DSI_CTRL);
+ data1 = data0;
+ data1 &= ~DSI_CTRL_ENABLE;
+ dsi_write(msm_host, REG_DSI_CTRL, data1);
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb();
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* make sure clocks enabled */
+
+ /* dsi controller can only be reset while clocks are running */
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+ dsi_write(msm_host, REG_DSI_CTRL, data0);
+ wmb(); /* make sure dsi controller enabled again */
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, err_work);
+ u32 status = msm_host->err_work_state;
+
+ pr_err("%s: status=%x\n", __func__, status);
+ if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+ dsi_sw_reset_restore(msm_host);
+
+ /* It is safe to clear here because error irq is disabled. */
+ msm_host->err_work_state = 0;
+
+ /* enable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+ /* Writing of an extra 0 needed to clear error bits */
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+ msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+ }
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+ }
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+ }
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+ /* fifo underflow, overflow */
+ if (status) {
+ dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+ if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+ }
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+ dsi_write(msm_host, REG_DSI_STATUS0, status);
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+ }
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+ if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+ dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+ }
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+ /* disable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+ dsi_clk_status(msm_host);
+ dsi_fifo_status(msm_host);
+ dsi_ack_err_status(msm_host);
+ dsi_timeout_status(msm_host);
+ dsi_status(msm_host);
+ dsi_dln0_phy_err(msm_host);
+
+ queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+ struct msm_dsi_host *msm_host = ptr;
+ u32 isr;
+ unsigned long flags;
+
+ if (!msm_host->ctrl_base)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+ DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+ if (isr & DSI_IRQ_ERROR)
+ dsi_error(msm_host);
+
+ if (isr & DSI_IRQ_VIDEO_DONE)
+ complete(&msm_host->video_comp);
+
+ if (isr & DSI_IRQ_CMD_DMA_DONE)
+ complete(&msm_host->dma_comp);
+
+ return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+ struct device *panel_device)
+{
+ int ret;
+
+ msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
+ "disp-enable");
+ if (IS_ERR(msm_host->disp_en_gpio)) {
+ DBG("cannot get disp-enable-gpios %ld",
+ PTR_ERR(msm_host->disp_en_gpio));
+ msm_host->disp_en_gpio = NULL;
+ }
+ if (msm_host->disp_en_gpio) {
+ ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
+ if (ret) {
+ pr_err("cannot set dir to disp-en-gpios %d\n", ret);
+ return ret;
+ }
+ }
+
+ msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
+ if (IS_ERR(msm_host->te_gpio)) {
+ DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+ msm_host->te_gpio = NULL;
+ }
+
+ if (msm_host->te_gpio) {
+ ret = gpiod_direction_input(msm_host->te_gpio);
+ if (ret) {
+ pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ msm_host->channel = dsi->channel;
+ msm_host->lanes = dsi->lanes;
+ msm_host->format = dsi->format;
+ msm_host->mode_flags = dsi->mode_flags;
+
+ msm_host->panel_node = dsi->dev.of_node;
+
+ /* Some gpios defined in panel DT need to be controlled by host */
+ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+ if (ret)
+ return ret;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ msm_host->panel_node = NULL;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ if (!msg || !msm_host->power_on)
+ return -EINVAL;
+
+ mutex_lock(&msm_host->cmd_mutex);
+ ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+ mutex_unlock(&msm_host->cmd_mutex);
+
+ return ret;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_host *msm_host = NULL;
+ struct platform_device *pdev = msm_dsi->pdev;
+ int ret;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host) {
+ pr_err("%s: FAILED: cannot alloc dsi host\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,dsi-host-index", &msm_host->id);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: host index not specified, ret=%d\n",
+ __func__, ret);
+ goto fail;
+ }
+ msm_host->pdev = pdev;
+
+ ret = dsi_clk_init(msm_host);
+ if (ret) {
+ pr_err("%s: unable to initialize dsi clks\n", __func__);
+ goto fail;
+ }
+
+ msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+ if (IS_ERR(msm_host->ctrl_base)) {
+ pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+ ret = PTR_ERR(msm_host->ctrl_base);
+ goto fail;
+ }
+
+ msm_host->cfg = dsi_get_config(msm_host);
+ if (!msm_host->cfg) {
+ ret = -EINVAL;
+ pr_err("%s: get config failed\n", __func__);
+ goto fail;
+ }
+
+ ret = dsi_regulator_init(msm_host);
+ if (ret) {
+ pr_err("%s: regulator init failed\n", __func__);
+ goto fail;
+ }
+
+ msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+ if (!msm_host->rx_buf) {
+ pr_err("%s: alloc rx temp buf failed\n", __func__);
+ goto fail;
+ }
+
+ init_completion(&msm_host->dma_comp);
+ init_completion(&msm_host->video_comp);
+ mutex_init(&msm_host->dev_mutex);
+ mutex_init(&msm_host->cmd_mutex);
+ mutex_init(&msm_host->clk_mutex);
+ spin_lock_init(&msm_host->intr_lock);
+
+ /* setup workqueue */
+ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ INIT_WORK(&msm_host->err_work, dsi_err_worker);
+
+ msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
+ msm_host->id);
+ if (!msm_dsi->phy) {
+ ret = -EINVAL;
+ pr_err("%s: phy init failed\n", __func__);
+ goto fail;
+ }
+ msm_dsi->host = &msm_host->base;
+ msm_dsi->id = msm_host->id;
+
+ DBG("Dsi Host %d initialized", msm_host->id);
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ flush_workqueue(msm_host->workqueue);
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+ }
+
+ mutex_destroy(&msm_host->clk_mutex);
+ mutex_destroy(&msm_host->cmd_mutex);
+ mutex_destroy(&msm_host->dev_mutex);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct platform_device *pdev = msm_host->pdev;
+ int ret;
+
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (msm_host->irq < 0) {
+ ret = msm_host->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, msm_host->irq,
+ dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
+ msm_host->dev = dev;
+ ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ if (ret) {
+ pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct device_node *node;
+ int ret;
+
+ /* Register mipi dsi host */
+ if (!msm_host->registered) {
+ host->dev = &msm_host->pdev->dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret)
+ return ret;
+
+ msm_host->registered = true;
+
+ /* If the panel driver has not been probed after host register,
+ * we should defer the host's probe.
+ * It makes sure panel is connected when fbcon detects
+ * connector status and gets the proper display mode to
+ * create framebuffer.
+ */
+ if (check_defer) {
+ node = of_get_child_by_name(msm_host->pdev->dev.of_node,
+ "panel");
+ if (node) {
+ if (!of_drm_find_panel(node))
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->registered) {
+ mipi_dsi_host_unregister(host);
+ host->dev = NULL;
+ host->ops = NULL;
+ msm_host->registered = false;
+ }
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ /* TODO: make sure dsi_cmd_mdp is idle.
+ * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+ * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+ * How to handle the old versions? Wait for mdp cmd done?
+ */
+
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ */
+ dsi_clk_ctrl(msm_host, 1);
+
+ /* TODO: vote for bus bandwidth */
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(0, msm_host);
+
+ msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+ dsi_write(msm_host, REG_DSI_CTRL,
+ msm_host->dma_cmd_ctrl_restore |
+ DSI_CTRL_CMD_MODE_EN |
+ DSI_CTRL_ENABLE);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+ return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+ dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(1, msm_host);
+
+ /* TODO: unvote for bus bandwidth */
+
+ dsi_clk_ctrl(msm_host, 0);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int data_byte, rx_byte, dlen, end;
+ int short_response, diff, pkt_size, ret = 0;
+ char cmd;
+ int rlen = msg->rx_len;
+ u8 *buf;
+
+ if (rlen <= 2) {
+ short_response = 1;
+ pkt_size = rlen;
+ rx_byte = 4;
+ } else {
+ short_response = 0;
+ data_byte = 10; /* first read */
+ if (rlen < data_byte)
+ pkt_size = rlen;
+ else
+ pkt_size = data_byte;
+ rx_byte = data_byte + 6; /* 4 header + 2 crc */
+ }
+
+ buf = msm_host->rx_buf;
+ end = 0;
+ while (!end) {
+ u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+ struct mipi_dsi_msg max_pkt_size_msg = {
+ .channel = msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ DBG("rlen=%d pkt_size=%d rx_byte=%d",
+ rlen, pkt_size, rx_byte);
+
+ ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+ if (ret < 2) {
+ pr_err("%s: Set max pkt size failed, %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ /* Clear the RDBK_DATA registers */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+ DSI_RDBK_DATA_CTRL_CLR);
+ wmb(); /* make sure the RDBK registers are cleared */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+ wmb(); /* release cleared status before transfer */
+ }
+
+ ret = dsi_cmds2buf_tx(msm_host, msg);
+ if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ * since rx fifo is 16 bytes, dcs header is kept at first loop,
+ * after that dcs header lost during shift into registers
+ */
+ dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+ if (dlen <= 0)
+ return 0;
+
+ if (short_response)
+ break;
+
+ if (rlen <= data_byte) {
+ diff = data_byte - rlen;
+ end = 1;
+ } else {
+ diff = 0;
+ rlen -= data_byte;
+ }
+
+ if (!end) {
+ dlen -= 2; /* 2 crc */
+ dlen -= diff;
+ buf += dlen; /* next start position */
+ data_byte = 14; /* NOT first read */
+ if (rlen < data_byte)
+ pkt_size += rlen;
+ else
+ pkt_size += data_byte;
+ DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+ }
+ }
+
+ /*
+ * For single Long read, if the requested rlen < 10,
+ * we need to shift the start position of rx
+ * data buffer to skip the bytes which are not
+ * updated.
+ */
+ if (pkt_size < 10 && !short_response)
+ buf = msm_host->rx_buf + (10 - rlen);
+ else
+ buf = msm_host->rx_buf;
+
+ cmd = buf[0];
+ switch (cmd) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ ret = 0;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ ret = dsi_short_read1_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ ret = dsi_short_read2_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ ret = dsi_long_read_resp(buf, msg);
+ break;
+ default:
+ pr_warn("%s:Invalid response cmd\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+ dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+ /* Make sure trigger happens */
+ wmb();
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+ /* TODO: clock should be turned off for command mode,
+ * and only turned on before MDP START.
+ * This part of code should be enabled once mdp driver support it.
+ */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
+ dsi_clk_ctrl(msm_host, 0); */
+
+ return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+ /* Since we have disabled INTF, the video engine won't stop so that
+ * the cmd engine will be blocked.
+ * Reset to disable video engine so that we can send off cmd.
+ */
+ dsi_sw_reset(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ u32 clk_pre = 0, clk_post = 0;
+ int ret = 0;
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (msm_host->power_on) {
+ DBG("dsi host already on");
+ goto unlock_ret;
+ }
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_host_regulator_enable(msm_host);
+ if (ret) {
+ pr_err("%s:Failed to enable vregs.ret=%d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_phy_sw_reset(msm_host);
+ ret = msm_dsi_manager_phy_enable(msm_host->id,
+ msm_host->byte_clk_rate * 8,
+ clk_get_rate(msm_host->esc_clk),
+ &clk_pre, &clk_post);
+ dsi_bus_clk_disable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable phy, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ ret = dsi_clk_ctrl(msm_host, 1);
+ if (ret) {
+ pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_timing_setup(msm_host);
+ dsi_sw_reset(msm_host);
+ dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+ msm_host->power_on = true;
+ mutex_unlock(&msm_host->dev_mutex);
+
+ return 0;
+
+fail_disable_reg:
+ dsi_host_regulator_disable(msm_host);
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (!msm_host->power_on) {
+ DBG("dsi host already off");
+ goto unlock_ret;
+ }
+
+ dsi_ctrl_config(msm_host, false, 0, 0);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+ msm_dsi_manager_phy_disable(msm_host->id);
+
+ dsi_clk_ctrl(msm_host, 0);
+
+ dsi_host_regulator_disable(msm_host);
+
+ DBG("-");
+
+ msm_host->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->mode) {
+ drm_mode_destroy(msm_host->dev, msm_host->mode);
+ msm_host->mode = NULL;
+ }
+
+ msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+ if (IS_ERR(msm_host->mode)) {
+ pr_err("%s: cannot duplicate mode\n", __func__);
+ return PTR_ERR(msm_host->mode);
+ }
+
+ return 0;
+}
+
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_panel *panel;
+
+ panel = of_drm_find_panel(msm_host->panel_node);
+ if (panel_flags)
+ *panel_flags = msm_host->mode_flags;
+
+ return panel;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000000..ee3ebcaa33f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+struct msm_dsi_manager {
+ struct msm_dsi *dsi[DSI_MAX];
+
+ bool is_dual_panel;
+ bool is_sync_needed;
+ int master_panel_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
+#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+ return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+ return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ /* We assume 2 dsi nodes have the same information of dual-panel and
+ * sync-mode, and only one node specifies master in case of dual mode.
+ */
+ if (!msm_dsim->is_dual_panel)
+ msm_dsim->is_dual_panel = of_property_read_bool(
+ np, "qcom,dual-panel-mode");
+
+ if (msm_dsim->is_dual_panel) {
+ if (of_property_read_bool(np, "qcom,master-panel"))
+ msm_dsim->master_panel_id = id;
+ if (!msm_dsim->is_sync_needed)
+ msm_dsim->is_sync_needed = of_property_read_bool(
+ np, "qcom,sync-dual-panel");
+ }
+
+ return 0;
+}
+
+struct dsi_connector {
+ struct drm_connector base;
+ int id;
+};
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ int id;
+};
+
+#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
+{
+ struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+ return dsi_connector->id;
+}
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+ return dsi_bridge->id;
+}
+
+static enum drm_connector_status dsi_mgr_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ DBG("id=%d", id);
+ if (!msm_dsi->panel) {
+ msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
+ &msm_dsi->panel_flags);
+
+ /* There is only 1 panel in the global panel list
+ * for dual panel mode. Therefore slave dsi should get
+ * the drm_panel instance from master dsi, and
+ * keep using the panel flags got from the current DSI link.
+ */
+ if (!msm_dsi->panel && IS_DUAL_PANEL() &&
+ !IS_MASTER_PANEL(id) && other_dsi)
+ msm_dsi->panel = msm_dsi_host_get_panel(
+ other_dsi->host, NULL);
+
+ if (msm_dsi->panel && IS_DUAL_PANEL())
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.tile_property, 0);
+
+ /* Set split display info to kms once dual panel is connected
+ * to both hosts
+ */
+ if (msm_dsi->panel && IS_DUAL_PANEL() &&
+ other_dsi && other_dsi->panel) {
+ bool cmd_mode = !(msm_dsi->panel_flags &
+ MIPI_DSI_MODE_VIDEO);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
+ struct drm_encoder *slave_enc = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
+
+ if (kms->funcs->set_split_display)
+ kms->funcs->set_split_display(kms, encoder,
+ slave_enc, cmd_mode);
+ else
+ pr_err("mdp does not support dual panel\n");
+ }
+ }
+
+ return msm_dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void dsi_mgr_connector_destroy(struct drm_connector *connector)
+{
+ DBG("");
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode, *m;
+
+ /* Only support left-right mode */
+ list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
+ mode->clock >>= 1;
+ mode->hdisplay >>= 1;
+ mode->hsync_start >>= 1;
+ mode->hsync_end >>= 1;
+ mode->htotal >>= 1;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int dsi_dual_connector_tile_init(
+ struct drm_connector *connector, int id)
+{
+ struct drm_display_mode *mode;
+ /* Fake topology id */
+ char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
+
+ if (connector->tile_group) {
+ DBG("Tile property has been initialized");
+ return 0;
+ }
+
+ /* Use the first mode only for now */
+ mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode,
+ head);
+ if (!mode)
+ return -EINVAL;
+
+ connector->tile_group = drm_mode_get_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group)
+ connector->tile_group = drm_mode_create_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group) {
+ pr_err("%s: failed to create tile group\n", __func__);
+ return -ENOMEM;
+ }
+
+ connector->has_tile = true;
+ connector->tile_is_single_monitor = true;
+
+ /* mode has been fixed */
+ connector->tile_h_size = mode->hdisplay;
+ connector->tile_v_size = mode->vdisplay;
+
+ /* Only support left-right mode */
+ connector->num_h_tile = 2;
+ connector->num_v_tile = 1;
+
+ connector->tile_v_loc = 0;
+ connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
+
+ return 0;
+}
+
+static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ int ret, num;
+
+ if (!panel)
+ return 0;
+
+ /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
+ * panel should not attach to any connector.
+ * Only temporarily attach panel to the current connector here,
+ * to let panel set mode to this connector.
+ */
+ drm_panel_attach(panel, connector);
+ num = drm_panel_get_modes(panel);
+ drm_panel_detach(panel);
+ if (!num)
+ return 0;
+
+ if (IS_DUAL_PANEL()) {
+ /* report half resolution to user */
+ dsi_dual_connector_fix_modes(connector);
+ ret = dsi_dual_connector_tile_init(connector, id);
+ if (ret)
+ return ret;
+ ret = drm_mode_connector_set_tile_property(connector);
+ if (ret) {
+ pr_err("%s: set tile property failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return num;
+}
+
+static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ DBG("");
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms, requested, encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+dsi_mgr_connector_best_encoder(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+ DBG("");
+ return msm_dsi_get_encoder(msm_dsi);
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = msm_dsi_host_power_on(host);
+ if (ret) {
+ pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+ goto host_on_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_on(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: power on host1 failed, %d\n",
+ __func__, ret);
+ goto host1_on_fail;
+ }
+ }
+
+ /* Always call panel functions once, because even for dual panels,
+ * there is only one drm_panel instance.
+ */
+ ret = drm_panel_prepare(panel);
+ if (ret) {
+ pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
+ goto panel_prep_fail;
+ }
+
+ ret = msm_dsi_host_enable(host);
+ if (ret) {
+ pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+ goto host_en_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_enable(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+ goto host1_en_fail;
+ }
+ }
+
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
+ goto panel_en_fail;
+ }
+
+ return;
+
+panel_en_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_disable(msm_dsi1->host);
+host1_en_fail:
+ msm_dsi_host_disable(host);
+host_en_fail:
+ drm_panel_unprepare(panel);
+panel_prep_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_power_off(msm_dsi1->host);
+host1_on_fail:
+ msm_dsi_host_power_off(host);
+host_on_fail:
+ return;
+}
+
+static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_disable(host);
+ if (ret)
+ pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_disable(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+ }
+
+ ret = drm_panel_unprepare(panel);
+ if (ret)
+ pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_off(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 power off failed, %d\n",
+ __func__, ret);
+ }
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_dual_panel = IS_DUAL_PANEL();
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ if (is_dual_panel && (DSI_1 == id))
+ return;
+
+ msm_dsi_host_set_display_mode(host, adjusted_mode);
+ if (is_dual_panel && other_dsi)
+ msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+}
+
+static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = dsi_mgr_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dsi_mgr_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
+ .get_modes = dsi_mgr_connector_get_modes,
+ .mode_valid = dsi_mgr_connector_mode_valid,
+ .best_encoder = dsi_mgr_connector_best_encoder,
+};
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+ .pre_enable = dsi_mgr_bridge_pre_enable,
+ .enable = dsi_mgr_bridge_enable,
+ .disable = dsi_mgr_bridge_disable,
+ .post_disable = dsi_mgr_bridge_post_disable,
+ .mode_set = dsi_mgr_bridge_mode_set,
+};
+
+/* initialize connector */
+struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_connector *connector = NULL;
+ struct dsi_connector *dsi_connector;
+ int ret;
+
+ dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_connector), GFP_KERNEL);
+ if (!dsi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_connector->id = id;
+
+ connector = &dsi_connector->base;
+
+ ret = drm_connector_init(msm_dsi->dev, connector,
+ &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (ret)
+ goto fail;
+
+ drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
+
+ /* Enable HPD to let hpd event is handled
+ * when panel is attached to the host.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ /* Display driver doesn't support interlace now. */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto fail;
+
+ return connector;
+
+fail:
+ if (connector)
+ dsi_mgr_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_bridge *bridge = NULL;
+ struct dsi_bridge *dsi_bridge;
+ int ret;
+
+ dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_bridge), GFP_KERNEL);
+ if (!dsi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_bridge->id = id;
+
+ bridge = &dsi_bridge->base;
+ bridge->funcs = &dsi_mgr_bridge_funcs;
+
+ ret = drm_bridge_attach(msm_dsi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ msm_dsi_manager_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+ int ret;
+
+ ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+ if (ret)
+ return ret;
+
+ msm_dsi->phy_enabled = true;
+ msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
+
+ return 0;
+}
+
+void msm_dsi_manager_phy_disable(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+
+ /* disable DSI phy
+ * In dual-dsi configuration, the phy should be disabled for the
+ * first controller only when the second controller is disabled.
+ */
+ msm_dsi->phy_enabled = false;
+ if (IS_DUAL_PANEL() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_phy_disable(sdsi->phy);
+ msm_dsi_phy_disable(mdsi->phy);
+ }
+ } else {
+ msm_dsi_phy_disable(phy);
+ }
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_read = (msg->rx_buf && msg->rx_len);
+ bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+ int ret;
+
+ if (!msg->tx_buf || !msg->tx_len)
+ return 0;
+
+ /* In dual master case, panel requires the same commands sent to
+ * both DSI links. Host issues the command trigger to both links
+ * when DSI_1 calls the cmd transfer function, no matter it happens
+ * before or after DSI_0 cmd transfer.
+ */
+ if (need_sync && (id == DSI_0))
+ return is_read ? msg->rx_len : msg->tx_len;
+
+ if (need_sync && msm_dsi0) {
+ ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare non-trigger host, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ ret = msm_dsi_host_xfer_prepare(host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+ goto restore_host0;
+ }
+
+ ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+ msm_dsi_host_cmd_tx(host, msg);
+
+ msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+ if (need_sync && msm_dsi0)
+ msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+ return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ if (IS_SYNC_NEEDED() && (id == DSI_0))
+ return false;
+
+ if (IS_SYNC_NEEDED() && msm_dsi0)
+ msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
+
+ msm_dsi_host_cmd_xfer_commit(host, iova, len);
+
+ return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+ int id = msm_dsi->id;
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ int ret;
+
+ if (id > DSI_MAX) {
+ pr_err("%s: invalid id %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (msm_dsim->dsi[id]) {
+ pr_err("%s: dsi%d already registered\n", __func__, id);
+ return -EBUSY;
+ }
+
+ msm_dsim->dsi[id] = msm_dsi;
+
+ ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+ if (ret) {
+ pr_err("%s: failed to parse dual panel info\n", __func__);
+ return ret;
+ }
+
+ if (!IS_DUAL_PANEL()) {
+ ret = msm_dsi_host_register(msm_dsi->host, true);
+ } else if (!other_dsi) {
+ return 0;
+ } else {
+ struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+ msm_dsi : other_dsi;
+ struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+ other_dsi : msm_dsi;
+ /* Register slave host first, so that slave DSI device
+ * has a chance to probe, and do not block the master
+ * DSI device's probe.
+ * Also, do not check defer for the slave host,
+ * because only master DSI device adds the panel to global
+ * panel list. The panel's device is the master DSI device.
+ */
+ ret = msm_dsi_host_register(sdsi->host, false);
+ if (ret)
+ return ret;
+ ret = msm_dsi_host_register(mdsi->host, true);
+ }
+
+ return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ if (msm_dsi->host)
+ msm_dsi_host_unregister(msm_dsi->host);
+ msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
new file mode 100644
index 000000000000..f0cea8927388
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct dsi_dphy_timing {
+ u32 clk_pre;
+ u32 clk_post;
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+};
+
+struct msm_dsi_phy {
+ void __iomem *base;
+ void __iomem *reg_base;
+ int id;
+ struct dsi_dphy_timing timing;
+ int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+ int (*disable)(struct msm_dsi_phy *phy);
+};
+
+#define S_DIV_ROUND_UP(n, d) \
+ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+ s32 min_result, bool even)
+{
+ s32 v;
+ v = (tmax - tmin) * percent;
+ v = S_DIV_ROUND_UP(v, 100) + tmin;
+ if (even && (v & 0x1))
+ return max_t(s32, min_result, v - 1);
+ else
+ return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+ s32 ui, s32 coeff, s32 pcnt)
+{
+ s32 tmax, tmin, clk_z;
+ s32 temp;
+
+ /* reset */
+ temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ if (tmin > 255) {
+ tmax = 511;
+ clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+ } else {
+ tmax = 255;
+ clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+ }
+
+ /* adjust */
+ temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+ timing->clk_zero = clk_z + 8 - temp;
+}
+
+static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ s32 ui, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 10;
+ s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+ tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+ temp = lpx / ui;
+ if (temp & 0x1)
+ timing->hs_rqst = temp;
+ else
+ timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+ /* Calculate clk_zero after clk_prepare and hs_rqst */
+ dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ temp = 85 * coeff + 6 * ui;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 40 * coeff + 4 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+ tmax = 255;
+ temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+ temp = 145 * coeff + 10 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 60 * coeff + 4 * ui;
+ tmin = DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ tmax = 255;
+ tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+ tmax = 63;
+ temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+ temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ tmax = 63;
+ temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+ temp += 8 * ui + lpx;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ if (tmin > tmax) {
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+ timing->clk_pre = temp >> 1;
+ temp = (2 * tmax - tmin) * pcnt2;
+ } else {
+ timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->clk_pre, timing->clk_post, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ pr_err("%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
+ else
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+
+ return 0;
+}
+
+static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+
+ return 0;
+}
+
+#define dsi_phy_func_init(name) \
+ do { \
+ phy->enable = dsi_##name##_phy_enable; \
+ phy->disable = dsi_##name##_phy_disable; \
+ } while (0)
+
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
+{
+ struct msm_dsi_phy *phy;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(phy->base)) {
+ pr_err("%s: failed to map phy base\n", __func__);
+ return NULL;
+ }
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+ if (IS_ERR_OR_NULL(phy->reg_base)) {
+ pr_err("%s: failed to map phy regulator base\n", __func__);
+ return NULL;
+ }
+
+ switch (type) {
+ case MSM_DSI_PHY_28NM:
+ dsi_phy_func_init(28nm);
+ break;
+ default:
+ pr_err("%s: unsupported type, %d\n", __func__, type);
+ return NULL;
+ }
+
+ phy->id = id;
+
+ return phy;
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ if (!phy || !phy->enable)
+ return -EINVAL;
+ return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
+}
+
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ if (!phy || !phy->disable)
+ return -EINVAL;
+ return phy->disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post)
+{
+ if (!phy)
+ return;
+ if (clk_pre)
+ *clk_pre = phy->timing.clk_pre;
+ if (clk_post)
+ *clk_post = phy->timing.clk_post;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index eeed006eed13..6997ec636c6d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -53,6 +53,23 @@ struct pll_rate {
/* NOTE: keep sorted highest freq to lowest: */
static const struct pll_rate freqtbl[] = {
+ { 154000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
/* 1080p60/1080p50 case */
{ 148500000, {
{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
@@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = {
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0, 0 } }
},
+ { 74176000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
{ 65000000, {
{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index cde25009203a..dbc068988377 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -83,7 +83,8 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
};
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -93,7 +94,8 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c276624290af..b9a4ded6e400 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48)
- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mdp5_intf {
+enum mdp5_intf_type {
+ INTF_DISABLED = 0,
INTF_DSI = 1,
INTF_HDMI = 3,
INTF_LCDC = 5,
INTF_eDP = 9,
+ INTF_VIRTUAL = 100,
+ INTF_WB = 101,
};
enum mdp5_intfnum {
@@ -67,11 +70,11 @@ enum mdp5_pipe {
enum mdp5_ctl_mode {
MODE_NONE = 0,
- MODE_ROT0 = 1,
- MODE_ROT1 = 2,
- MODE_WB0 = 3,
- MODE_WB1 = 4,
- MODE_WFD = 5,
+ MODE_WB_0_BLOCK = 1,
+ MODE_WB_1_BLOCK = 2,
+ MODE_WB_0_LINE = 3,
+ MODE_WB_1_LINE = 4,
+ MODE_WB_2_LINE = 5,
};
enum mdp5_pack_3d {
@@ -94,33 +97,6 @@ enum mdp5_pipe_bwc {
BWC_Q_MED = 2,
};
-enum mdp5_client_id {
- CID_UNUSED = 0,
- CID_VIG0_Y = 1,
- CID_VIG0_CR = 2,
- CID_VIG0_CB = 3,
- CID_VIG1_Y = 4,
- CID_VIG1_CR = 5,
- CID_VIG1_CB = 6,
- CID_VIG2_Y = 7,
- CID_VIG2_CR = 8,
- CID_VIG2_CB = 9,
- CID_DMA0_Y = 10,
- CID_DMA0_CR = 11,
- CID_DMA0_CB = 12,
- CID_DMA1_Y = 13,
- CID_DMA1_CR = 14,
- CID_DMA1_CB = 15,
- CID_RGB0 = 16,
- CID_RGB1 = 17,
- CID_RGB2 = 18,
- CID_VIG3_Y = 19,
- CID_VIG3_CR = 20,
- CID_VIG3_CB = 21,
- CID_RGB3 = 22,
- CID_MAX = 23,
-};
-
enum mdp5_cursor_format {
CURSOR_FMT_ARGB8888 = 0,
CURSOR_FMT_ARGB1555 = 2,
@@ -144,30 +120,25 @@ enum mdp5_data_format {
DATA_FORMAT_YUV = 1,
};
-#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
-#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
-#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
-#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
-#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
-#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
-#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
-#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
-#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
-#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
-#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
-#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
-#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
-#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
-#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
-#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
-#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
-#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
-#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
-#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
-#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
-#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
-#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
-#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_WB_0_DONE 0x00000001
+#define MDP5_IRQ_WB_1_DONE 0x00000002
+#define MDP5_IRQ_WB_2_DONE 0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
#define MDP5_IRQ_INTF0_VSYNC 0x02000000
#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
@@ -176,136 +147,186 @@ enum mdp5_data_format {
#define MDP5_IRQ_INTF2_VSYNC 0x20000000
#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
#define MDP5_IRQ_INTF3_VSYNC 0x80000000
-#define REG_MDP5_HW_VERSION 0x00000000
+#define REG_MDSS_HW_VERSION 0x00000000
+#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-#define REG_MDP5_HW_INTR_STATUS 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
-#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
-#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
-#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+static inline uint32_t __offset_MDP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->mdp.base[0]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define REG_MDP5_MDP_VERSION 0x00000100
-#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
-#define MDP5_MDP_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
+#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
}
-#define REG_MDP5_DISP_INTF_SEL 0x00000104
-#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
}
-#define REG_MDP5_INTR_EN 0x00000110
+static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_STATUS 0x00000114
+static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_CLEAR 0x00000118
+static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_EN 0x0000011c
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
-static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
+#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
{
switch (idx) {
- case IGC_VIG: return 0x00000300;
- case IGC_RGB: return 0x00000310;
- case IGC_DMA: return 0x00000320;
- case IGC_DSPP: return 0x00000400;
+ case IGC_VIG: return 0x00000200;
+ case IGC_RGB: return 0x00000210;
+ case IGC_DMA: return 0x00000220;
+ case IGC_DSPP: return 0x00000300;
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
-static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN 0x000003f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
@@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_WB 0x00010000
#define MDP5_CTL_FLUSH_CTL 0x00020000
#define MDP5_CTL_FLUSH_VIG3 0x00040000
#define MDP5_CTL_FLUSH_RGB3 0x00080000
#define MDP5_CTL_FLUSH_LM5 0x00100000
#define MDP5_CTL_FLUSH_DSPP3 0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
@@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc
static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->pp.base[0]);
+ case 1: return (mdp5_cfg->pp.base[1]);
+ case 2: return (mdp5_cfg->pp.base[2]);
+ case 3: return (mdp5_cfg->pp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
static inline uint32_t __offset_INTF(uint32_t idx)
{
switch (idx) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index b0a44310cf2a..e001e6b2296a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74_config = {
.name = "msm8x74",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
@@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
+ .pp = {
+ .count = 3,
+ .base = { 0x12d00, 0x12e00, 0x12f00 },
+ },
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
- .reserved[CID_RGB0] = 2,
- .reserved[CID_RGB1] = 2,
- .reserved[CID_RGB2] = 2,
- .reserved[CID_RGB3] = 2,
+ .reserved = {
+ /* Two SMP blocks are statically tied to RGB pipes: */
+ [16] = 2, [17] = 2, [18] = 2, [22] = 2,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
@@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
+ .pp = {
+ .count = 4,
+ .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ },
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ .max_clk = 320000000,
+};
+
+const struct mdp5_cfg_hw msm8x16_config = {
+ .name = "msm8x16",
+ .mdp = {
+ .count = 1,
+ .base = { 0x01000 },
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x05000 },
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x15000, 0x17000 },
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x25000 },
+ },
+ .lm = {
+ .count = 2, /* LM0 and LM3 */
+ .base = { 0x45000, 0x48000 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x55000 },
+
+ },
+ .intf = {
+ .count = 1, /* INTF_1 */
+ .base = { 0x6B800 },
+ },
+ /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
.max_clk = 320000000,
};
@@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74_config } },
{ .revision = 2, .config = { .hw = &msm8x74_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
+ { .revision = 6, .config = { .hw = &msm8x16_config } },
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index dba4d52cceeb..3a551b0892d8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -44,26 +44,38 @@ struct mdp5_lm_block {
uint32_t nb_stages; /* number of stages per blender */
};
+struct mdp5_ctl_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
+};
+
struct mdp5_smp_block {
int mmb_count; /* number of SMP MMBs */
int mmb_size; /* MMB: size in bytes */
+ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
};
+#define MDP5_INTF_NUM_MAX 5
+
struct mdp5_cfg_hw {
char *name;
+ struct mdp5_sub_block mdp;
struct mdp5_smp_block smp;
- struct mdp5_sub_block ctl;
+ struct mdp5_ctl_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
+ struct mdp5_sub_block pp;
struct mdp5_sub_block intf;
+ u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+
uint32_t max_clk;
};
@@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
+ typeof(intf_type) __val = (intf_type); \
+ (__val) >= INTF_VIRTUAL ? true : false; })
+
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 000000000000..e4e89567f51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_cmd_encoder {
+ struct drm_encoder base;
+ struct mdp5_interface intf;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
+}
+
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ if (mdp5_cmd_enc->bsc) {
+ msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
+ mdp5_cmd_enc->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
+{
+ if (mdp5_cmd_enc->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
+#endif
+
+#define VSYNC_CLK_RATE 19200000
+static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct device *dev = encoder->dev->dev;
+ u32 total_lines_x100, vclks_line, cfg;
+ long vsync_clk_speed;
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
+ dev_err(dev, "vsync_clk is not initialized\n");
+ return -EINVAL;
+ }
+
+ total_lines_x100 = mode->vtotal * mode->vrefresh;
+ if (!total_lines_x100) {
+ dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, mode->vrefresh);
+ return -EINVAL;
+ }
+
+ vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
+ if (vsync_clk_speed <= 0) {
+ dev_err(dev, "vsync_clk round rate failed %ld\n",
+ vsync_clk_speed);
+ return -EINVAL;
+ }
+ vclks_line = vsync_clk_speed * 100 / total_lines_x100;
+
+ cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
+ MDP5_PP_SYNC_THRESH_START(4) |
+ MDP5_PP_SYNC_THRESH_CONTINUE(4));
+
+ return 0;
+}
+
+static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ int ret;
+
+ ret = clk_set_rate(mdp5_kms->vsync_clk,
+ clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_set_rate failed, %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(mdp5_kms->vsync_clk);
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_prepare_enable failed, %d\n", ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
+
+ return 0;
+}
+
+static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
+ clk_disable_unprepare(mdp5_kms->vsync_clk);
+}
+
+static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ bs_fini(mdp5_cmd_enc);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_cmd_enc);
+}
+
+static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
+ .destroy = mdp5_cmd_encoder_destroy,
+};
+
+static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+ pingpong_tearcheck_setup(encoder, mode);
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+}
+
+static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+
+ if (WARN_ON(!mdp5_cmd_enc->enabled))
+ return;
+
+ /* Wait for the last frame done */
+ mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
+ pingpong_tearcheck_disable(encoder);
+
+ mdp5_ctl_set_encoder_state(ctl, false);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ bs_set(mdp5_cmd_enc, 0);
+
+ mdp5_cmd_enc->enabled = false;
+}
+
+static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+
+ if (WARN_ON(mdp5_cmd_enc->enabled))
+ return;
+
+ bs_set(mdp5_cmd_enc, 1);
+ if (pingpong_tearcheck_enable(encoder))
+ return;
+
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
+
+ mdp5_cmd_enc->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
+ .mode_fixup = mdp5_cmd_encoder_mode_fixup,
+ .mode_set = mdp5_cmd_encoder_mode_set,
+ .disable = mdp5_cmd_encoder_disable,
+ .enable = mdp5_cmd_encoder_enable,
+};
+
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_cmd_enc->intf.num;
+
+ /* Switch slave encoder's trigger MUX, to use the master's
+ * start signal for the slave encoder
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ else
+ return -EINVAL;
+
+ /* Smart Panel, Sync mode */
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
+/* initialize command mode encoder */
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_cmd_encoder *mdp5_cmd_enc;
+ int ret;
+
+ if (WARN_ON((intf->type != INTF_DSI) &&
+ (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
+ if (!mdp5_cmd_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
+ encoder = &mdp5_cmd_enc->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
+ DRM_MODE_ENCODER_DSI);
+
+ drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
+
+ bs_init(mdp5_cmd_enc);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_cmd_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 2f2863cf8b45..c1530772187d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
-
static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
+
+ flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
crtc_flush(crtc, flush_mask);
}
@@ -298,8 +296,6 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- crtc_flush_all(crtc);
-
mdp5_crtc->enabled = true;
}
@@ -444,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- struct drm_gem_object *cursor_bo, *old_bo;
+ struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
+ bool cursor_enable = true;
unsigned long flags;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -463,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
- return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
+ cursor_enable = false;
+ goto set_cursor;
}
cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -504,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
- if (ret)
+set_cursor:
+ ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
+ if (ret) {
+ dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ cursor_enable ? "en" : "dis", ret);
goto end;
+ }
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
crtc_flush(crtc, flush_mask);
end:
@@ -613,64 +614,39 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
}
/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id)
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- uint32_t flush_mask = 0;
- uint32_t intf_sel;
- unsigned long flags;
+ int lm = mdp5_crtc_get_lm(crtc);
/* now that we know what irq's we want: */
- mdp5_crtc->err.irqmask = intf2err(intf);
- mdp5_crtc->vblank.irqmask = intf2vblank(intf);
- mdp_irq_update(&mdp5_kms->base);
-
- spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
-
- switch (intf) {
- case 0:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
- break;
- case 1:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
- break;
- case 2:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
- break;
- case 3:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
- break;
- default:
- BUG();
- break;
- }
+ mdp5_crtc->err.irqmask = intf2err(intf->num);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
- spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+ /* Register command mode Pingpong done as vblank for now,
+ * so that atomic commit should wait for it to finish.
+ * Ideally, in the future, we should take rd_ptr done as vblank,
+ * and let atomic commit wait for pingpong done for commond mode.
+ */
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
+ else
+ mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
+ mdp_irq_update(&mdp5_kms->base);
- DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
-
- crtc_flush(crtc, flush_mask);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+}
- if (WARN_ON(!crtc))
- return -EINVAL;
-
- return mdp5_crtc->lm;
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
}
/* initialize crtc */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 151129032d16..5488b687c8d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,23 +33,31 @@
* requested by the client (in mdp5_crtc_mode_set()).
*/
+struct op_mode {
+ struct mdp5_interface intf;
+
+ bool encoder_enabled;
+ uint32_t start_mask;
+};
+
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
+ int lm;
/* whether this CTL has been allocated or not: */
bool busy;
- /* memory output connection (@see mdp5_ctl_mode): */
- u32 mode;
+ /* Operation Mode Configuration for the Pipeline */
+ struct op_mode pipeline;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
- /* flush mask used to commit CTL registers */
- u32 flush_mask;
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
+ u32 pending_ctl_trigger;
bool cursor_on;
@@ -63,6 +71,9 @@ struct mdp5_ctl_manager {
u32 nlm;
u32 nctl;
+ /* to filter out non-present bits in the current hardware config */
+ u32 flush_hw_mask;
+
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
@@ -94,31 +105,172 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
return mdp5_read(mdp5_kms, reg);
}
+static void set_display_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ unsigned long flags;
+ u32 intf_sel;
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+
+ switch (intf->num) {
+ case 0:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf)
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
{
unsigned long flags;
- static const enum mdp5_intfnum intfnum[] = {
- INTF0, INTF1, INTF2, INTF3,
- };
+ u32 ctl_op = 0;
+
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
+
+ switch (intf->type) {
+ case INTF_DSI:
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ ctl_op |= MDP5_CTL_OP_CMD_MODE;
+ break;
+
+ case INTF_WB:
+ if (intf->mode == MDP5_INTF_WB_MODE_LINE)
+ ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
+ break;
+
+ default:
+ break;
+ }
spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
- MDP5_CTL_OP_MODE(ctl->mode) |
- MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
+
+ ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
+ mdp_ctl_flush_mask_encoder(intf);
+
+ /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ set_display_intf(mdp5_kms, intf);
+
+ set_ctl_op(ctl, intf);
return 0;
}
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+static bool start_signal_needed(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+
+ if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
+ return false;
+
+ switch (pipeline->intf.type) {
+ case INTF_WB:
+ return true;
+ case INTF_DSI:
+ return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
+ default:
+ return false;
+ }
+}
+
+/*
+ * send_start_signal() - Overlay Processor Start Signal
+ *
+ * For a given control operation (display pipeline), a START signal needs to be
+ * executed in order to kick off operation and activate all layers.
+ * e.g.: DSI command mode, Writeback
+ */
+static void send_start_signal(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+static void refill_start_mask(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+ struct mdp5_interface *intf = &ctl->pipeline.intf;
+
+ pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+
+ /*
+ * Writeback encoder needs to program & flush
+ * address registers for each page flip..
+ */
+ if (intf->type == INTF_WB)
+ pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
+}
+
+/**
+ * mdp5_ctl_set_encoder_state() - set the encoder state
+ *
+ * @enable: true, when encoder is ready for data streaming; false, otherwise.
+ *
+ * Note:
+ * This encoder state is needed to trigger START signal (data path kickoff).
+ */
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
+{
+ if (WARN_ON(!ctl))
+ return -EINVAL;
+
+ ctl->pipeline.encoder_enabled = enabled;
+ DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm;
+ int lm = ctl->lm;
- lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
@@ -138,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
ctl->cursor_on = enable;
return 0;
}
-
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
unsigned long flags;
@@ -157,37 +309,122 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+
return 0;
}
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
+{
+ if (intf->type == INTF_WB)
+ return MDP5_CTL_FLUSH_WB;
+
+ switch (intf->num) {
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ switch (cursor_id) {
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ u32 sw_mask = 0;
+#define BIT_NEEDS_SW_FIX(bit) \
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
+
+ /* for some targets, cursor bit is the same as LM bit */
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
+ sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+
+ return sw_mask;
+}
+
+/**
+ * mdp5_ctl_commit() - Register Flush
+ *
+ * The flush register is used to indicate several registers are all
+ * programmed, and are safe to update to the back copy of the double
+ * buffered registers.
+ *
+ * Some registers FLUSH bits are shared when the hardware does not have
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
+ *
+ * CTL registers need to be flushed in some circumstances; if that is the
+ * case, some trigger bits will be present in both flush mask and
+ * ctl->pending_ctl_trigger.
+ */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
- if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
- int lm = mdp5_crtc_get_lm(ctl->crtc);
+ pipeline->start_mask &= ~flush_mask;
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
- return -EINVAL;
- }
+ VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
+ pipeline->start_mask, ctl->pending_ctl_trigger);
- /* for current targets, cursor bit is the same as LM bit */
- flush_mask |= mdp_ctl_flush_mask_lm(lm);
+ if (ctl->pending_ctl_trigger & flush_mask) {
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
+ ctl->pending_ctl_trigger = 0;
}
- spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
- spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ flush_mask |= fix_sw_flush(ctl, flush_mask);
- return 0;
-}
+ flush_mask &= ctl_mgr->flush_hw_mask;
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
-{
- return ctl->flush_mask;
+ if (flush_mask) {
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -208,6 +445,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
DBG("CTL %d released", ctl->id);
}
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
+{
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
+
/*
* mdp5_ctl_request() - CTL dynamic allocation
*
@@ -235,8 +477,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
ctl = &ctl_mgr->ctls[c];
+ ctl->lm = mdp5_crtc_get_lm(crtc);
ctl->crtc = crtc;
ctl->busy = true;
+ ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
unlock:
@@ -267,7 +511,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr;
- const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -289,6 +533,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
@@ -303,9 +548,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
- ctl->mode = MODE_NONE;
ctl->reg_offset = ctl_cfg->base[c];
- ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
ctl->busy = false;
spin_lock_init(&ctl->hw_lock);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ad48788efeea..7a62000994a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -33,19 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf);
+struct mdp5_interface;
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
-
-/* @blend_cfg: see LM blender config definition below */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-
-/* @flush_mask: see CTL flush masks definitions below */
-int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
-
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
/*
* blend_cfg (LM blender config):
@@ -72,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
}
/*
- * flush_mask (CTL flush masks):
+ * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
+ *
+ * @blend_cfg: see LM blender config definition below
*
- * The following functions allow each DRM entity to get and store
- * their own flush mask.
- * Once stored, these masks will then be accessed through each DRM's
- * interface and used by the caller of mdp5_ctl_commit() to specify
- * which block(s) need to be flushed through @flush_mask parameter.
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
+/**
+ * mdp_ctl_flush_mask...() - Register FLUSH masks
+ *
+ * These masks are used to specify which block(s) need to be flushed
+ * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
+ */
+u32 mdp_ctl_flush_mask_lm(int lm);
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
+u32 mdp_ctl_flush_mask_cursor(int cursor_id);
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
-static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
-{
- /* TODO: use id once multiple cursor support is present */
- (void)cursor_id;
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
- return MDP5_CTL_FLUSH_CURSOR_DUMMY;
-}
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
-static inline u32 mdp_ctl_flush_mask_lm(int lm)
-{
- switch (lm) {
- case 0: return MDP5_CTL_FLUSH_LM0;
- case 1: return MDP5_CTL_FLUSH_LM1;
- case 2: return MDP5_CTL_FLUSH_LM2;
- case 5: return MDP5_CTL_FLUSH_LM5;
- default: return 0;
- }
-}
-static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
- case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
- case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
- case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
- case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
- case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
- case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
- case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
- case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
- case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
- default: return 0;
- }
-}
#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index af0e02fa4f48..1188f4bf1e60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -23,8 +23,7 @@
struct mdp5_encoder {
struct drm_encoder base;
- int intf;
- enum mdp5_intf intf_id;
+ struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
@@ -126,7 +125,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- int intf = mdp5_encoder->intf;
+ int intf = mdp5_encoder->intf.num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
@@ -188,7 +187,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
- if (mdp5_encoder->intf_id == INTF_eDP) {
+ if (mdp5_encoder->intf.type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
@@ -218,21 +217,29 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
+ mdp5_ctl_set_encoder_state(ctl, false);
+
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -242,7 +249,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
bs_set(mdp5_encoder, 0);
@@ -253,19 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
return;
- mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
- mdp5_encoder->intf_id);
-
bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
mdp5_encoder->enabled = true;
}
@@ -277,12 +286,51 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.enable = mdp5_encoder_enable,
};
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_encoder->intf.num;
+
+ /* Switch slave encoder's TimingGen Sync mode,
+ * to use the master's enable signal for the slave encoder.
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ else
+ return -EINVAL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+ MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ /* Dumb Panel, Sync mode */
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
/* initialize encoder */
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id)
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
+ int enc_type = (intf->type == INTF_DSI) ?
+ DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
int ret;
mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
@@ -291,14 +339,13 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
goto fail;
}
- mdp5_encoder->intf = intf;
- mdp5_encoder->intf_id = intf_id;
+ memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type);
+
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
bs_init(mdp5_encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index a9407105b9b7..33bd4c6160dd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -23,7 +23,7 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -35,8 +35,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -61,7 +61,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -73,8 +73,8 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
unsigned int id;
uint32_t status;
- status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+ status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
VERB("status=%08x", status);
@@ -91,13 +91,13 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
uint32_t intr;
- intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+ intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
- if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
+ if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
- intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+ intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
}
while (intr) {
@@ -128,10 +128,10 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
* can register to get their irq's delivered
*/
-#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
- MDP5_HW_INTR_STATUS_INTR_DSI1 | \
- MDP5_HW_INTR_STATUS_INTR_HDMI | \
- MDP5_HW_INTR_STATUS_INTR_EDP)
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 92b61db5754c..dfa8beb9343a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -58,7 +58,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
@@ -86,6 +86,18 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
return rate;
}
+static int mdp5_set_split_display(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode)
+{
+ if (is_cmd_mode)
+ return mdp5_cmd_encoder_set_split_display(encoder,
+ slave_encoder);
+ else
+ return mdp5_encoder_set_split_display(encoder, slave_encoder);
+}
+
static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -131,6 +143,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
+ .set_split_display = mdp5_set_split_display,
.preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
@@ -161,6 +174,134 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
return 0;
}
+static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
+ enum mdp5_intf_type intf_type, int intf_num,
+ enum mdp5_intf_mode intf_mode)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct mdp5_interface intf = {
+ .num = intf_num,
+ .type = intf_type,
+ .mode = intf_mode,
+ };
+
+ if ((intf_type == INTF_DSI) &&
+ (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
+ encoder = mdp5_cmd_encoder_init(dev, &intf);
+ else
+ encoder = mdp5_encoder_init(dev, &intf);
+
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ return encoder;
+ }
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return encoder;
+}
+
+static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
+{
+ const int intf_cnt = hw_cfg->intf.count;
+ const u32 *intfs = hw_cfg->intfs;
+ int id = 0, i;
+
+ for (i = 0; i < intf_cnt; i++) {
+ if (intfs[i] == INTF_DSI) {
+ if (intf_num == i)
+ return id;
+
+ id++;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ switch (intf_type) {
+ case INTF_DISABLED:
+ break;
+ case INTF_eDP:
+ if (!priv->edp)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ break;
+ case INTF_HDMI:
+ if (!priv->hdmi)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ break;
+ case INTF_DSI:
+ {
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
+ struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
+ enum mdp5_intf_mode mode;
+ int i;
+
+ if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ intf_num);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
+ MDP5_INTF_DSI_MODE_COMMAND :
+ MDP5_INTF_DSI_MODE_VIDEO;
+ dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
+ intf_num, mode);
+ if (IS_ERR(dsi_encs)) {
+ ret = PTR_ERR(dsi_encs);
+ break;
+ }
+ }
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ break;
+ }
+ default:
+ dev_err(dev->dev, "unknown intf: %d\n", intf_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
@@ -171,7 +312,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
@@ -222,44 +362,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
}
- if (priv->hdmi) {
- /* Construct encoder for HDMI: */
- encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
- priv->encoders[priv->num_encoders++] = encoder;
-
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
- }
- }
-
- if (priv->edp) {
- /* Construct encoder for eDP: */
- encoder = mdp5_encoder_init(dev, 0, INTF_eDP);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct eDP encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
- priv->encoders[priv->num_encoders++] = encoder;
-
- /* Construct bridge/connector for eDP: */
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize eDP: %d\n",
- ret);
+ /* Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
+ ret = modeset_init_intf(mdp5_kms, i);
+ if (ret)
goto fail;
- }
}
return 0;
@@ -274,11 +383,11 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
uint32_t version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
- *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+ *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
@@ -321,6 +430,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->dev = dev;
+ /* mdp5_kms->mmio actually represents the MDSS base address */
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -403,8 +513,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- for (i = 0; i < config->hw->intf.count; i++)
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (!config->hw->intf.base[i] ||
+ mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
+ continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ }
mdp5_disable(mdp5_kms);
mdelay(16);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 49d011e8835b..2c0de174cc09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -54,7 +54,7 @@ struct mdp5_kms {
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_DISP_INTF_SEL
+ * - REG_MDP5_MDP_DISP_INTF_SEL
*/
spinlock_t resource_lock;
@@ -94,6 +94,24 @@ struct mdp5_plane_state {
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
+enum mdp5_intf_mode {
+ MDP5_INTF_MODE_NONE = 0,
+
+ /* Modes used for DSI interface (INTF_DSI type): */
+ MDP5_INTF_DSI_MODE_VIDEO,
+ MDP5_INTF_DSI_MODE_COMMAND,
+
+ /* Modes used for WB interface (INTF_WB type): */
+ MDP5_INTF_WB_MODE_BLOCK,
+ MDP5_INTF_WB_MODE_LINE,
+};
+
+struct mdp5_interface {
+ int num; /* display interface number */
+ enum mdp5_intf_type type;
+ enum mdp5_intf_mode mode;
+};
+
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
msm_writel(data, mdp5_kms->mmio + reg);
@@ -130,9 +148,9 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
-static inline uint32_t intf2err(int intf)
+static inline uint32_t intf2err(int intf_num)
{
- switch (intf) {
+ switch (intf_num) {
case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
@@ -141,9 +159,23 @@ static inline uint32_t intf2err(int intf)
}
}
-static inline uint32_t intf2vblank(int intf)
+#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
+static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
{
- switch (intf) {
+ /*
+ * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
+ * acts as a Vblank signal. The Ping Pong buffer used is bound to
+ * layer mixer.
+ */
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
+
+ if (intf->type == INTF_WB)
+ return MDP5_IRQ_WB_2_DONE;
+
+ switch (intf->num) {
case 0: return MDP5_IRQ_INTF0_VSYNC;
case 1: return MDP5_IRQ_INTF1_VSYNC;
case 2: return MDP5_IRQ_INTF2_VSYNC;
@@ -152,6 +184,11 @@ static inline uint32_t intf2vblank(int intf)
}
}
+static inline uint32_t lm2ppdone(int lm)
+{
+ return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
+}
+
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
@@ -197,13 +234,33 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id);
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+#else
+static inline struct drm_encoder *mdp5_cmd_encoder_init(
+ struct drm_device *dev, struct mdp5_interface *intf)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int mdp5_cmd_encoder_set_split_display(
+ struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
+{
+ return -EINVAL;
+}
+#endif
#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 05cf9ab2a876..18a3d203b174 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -156,7 +156,8 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -166,7 +167,8 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -505,8 +507,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 1f795af89680..16702aecf0df 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -43,7 +43,7 @@
* set.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
*
* 3) mdp5_smp_commit():
@@ -74,7 +74,7 @@ struct mdp5_smp {
spinlock_t state_lock;
mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
- struct mdp5_client_smp_state client_state[CID_MAX];
+ struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
static inline
@@ -85,27 +85,31 @@ struct mdp5_kms *get_kms(struct mdp5_smp *smp)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
{
- WARN_ON(plane >= pipe2nclients(pipe));
- switch (pipe) {
- case SSPP_VIG0: return CID_VIG0_Y + plane;
- case SSPP_VIG1: return CID_VIG1_Y + plane;
- case SSPP_VIG2: return CID_VIG2_Y + plane;
- case SSPP_RGB0: return CID_RGB0;
- case SSPP_RGB1: return CID_RGB1;
- case SSPP_RGB2: return CID_RGB2;
- case SSPP_DMA0: return CID_DMA0_Y + plane;
- case SSPP_DMA1: return CID_DMA1_Y + plane;
- case SSPP_VIG3: return CID_VIG3_Y + plane;
- case SSPP_RGB3: return CID_RGB3;
- default: return CID_UNUSED;
- }
+#define CID_UNUSED 0
+
+ if (WARN_ON(plane >= pipe2nclients(pipe)))
+ return CID_UNUSED;
+
+ /*
+ * Note on SMP clients:
+ * For ViG pipes, fetch Y/Cr/Cb-components clients are always
+ * consecutive, and in that order.
+ *
+ * e.g.:
+ * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
+ * Y plane's client ID is N
+ * Cr plane's client ID is N + 1
+ * Cb plane's client ID is N + 2
+ */
+
+ return mdp5_cfg->smp.clients[pipe] + plane;
}
/* step #1: update # of blocks pending for the client: */
static int smp_request_block(struct mdp5_smp *smp,
- enum mdp5_client_id cid, int nblks)
+ u32 cid, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
const struct mdp5_cfg_hw *hw_cfg;
@@ -227,7 +231,7 @@ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
}
static void update_smp_state(struct mdp5_smp *smp,
- enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+ u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
@@ -237,25 +241,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
switch (fld) {
case 0:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
}
}
@@ -267,7 +271,7 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
@@ -283,7 +287,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
/*
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 18fd643b6e69..5b192128cda2 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -96,11 +96,11 @@ static void complete_commit(struct msm_commit *c)
kms->funcs->prepare_commit(kms, state);
- drm_atomic_helper_commit_pre_planes(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_commit_post_planes(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a4269119f9ea..47f4dd407671 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -182,41 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
return 4;
}
-static int msm_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct msm_drm_private *priv;
- struct msm_kms *kms;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
+#include <linux/of_address.h>
- dev->dev_private = priv;
-
- priv->wq = alloc_ordered_workqueue("msm", 0);
- init_waitqueue_head(&priv->fence_event);
- init_waitqueue_head(&priv->pending_crtcs_event);
-
- INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->fence_cbs);
+static int msm_init_vram(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long size = 0;
+ int ret = 0;
- drm_mode_config_init(dev);
+#ifdef CONFIG_OF
+ /* In the device-tree world, we could have a 'memory-region'
+ * phandle, which gives us a link to our "vram". Allocating
+ * is all nicely abstracted behind the dma api, but we need
+ * to know the entire size to allocate it all in one go. There
+ * are two cases:
+ * 1) device with no IOMMU, in which case we need exclusive
+ * access to a VRAM carveout big enough for all gpu
+ * buffers
+ * 2) device with IOMMU, but where the bootloader puts up
+ * a splash screen. In this case, the VRAM carveout
+ * need only be large enough for fbdev fb. But we need
+ * exclusive access to the buffer to avoid the kernel
+ * using those pages for other purposes (which appears
+ * as corruption on screen before we have a chance to
+ * load and do initial modeset)
+ */
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
+ if (node) {
+ struct resource r;
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+ size = r.end - r.start;
+ DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
+ } else
+#endif
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
if (!iommu_present(&platform_bus_type)) {
+ DRM_INFO("using %s VRAM carveout\n", vram);
+ size = memparse(vram, NULL);
+ }
+
+ if (size) {
DEFINE_DMA_ATTRS(attrs);
- unsigned long size;
void *p;
- DBG("using %s VRAM carveout", vram);
- size = memparse(vram, NULL);
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
@@ -232,8 +248,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
dev_info(dev->dev, "VRAM: %08x->%08x\n",
@@ -241,6 +256,37 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
(uint32_t)(priv->vram.paddr + size));
}
+ return ret;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+
+ drm_mode_config_init(dev);
+
+ ret = msm_init_vram(dev);
+ if (ret)
+ goto fail;
+
platform_set_drvdata(pdev, dev);
/* Bind all our sub-components: */
@@ -1030,6 +1076,7 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_dsi_register();
msm_edp_register();
hdmi_register();
adreno_register();
@@ -1043,6 +1090,7 @@ static void __exit msm_drm_unregister(void)
hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
+ msm_dsi_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441b61c3..04db4bd1b5b6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,6 +82,9 @@ struct msm_drm_private {
*/
struct msm_edp *edp;
+ /* DSI is shared by mdp4 and mdp5 */
+ struct msm_dsi *dsi[2];
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+struct msm_dsi;
+enum msm_dsi_encoder_id {
+ MSM_DSI_VIDEO_ENCODER_ID = 0,
+ MSM_DSI_CMD_ENCODER_ID = 1,
+ MSM_DSI_ENCODER_NUM = 2
+};
+#ifdef CONFIG_DRM_MSM_DSI
+void __init msm_dsi_register(void);
+void __exit msm_dsi_unregister(void);
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
+#else
+static inline void __init msm_dsi_register(void)
+{
+}
+static inline void __exit msm_dsi_unregister(void)
+{
+}
+static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
+ struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ return -EINVAL;
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65728ff..95f6532df02d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
+ MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4fb55ac..479d8af72bcb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
priv->vram.paddr;
}
+static bool use_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ return !msm_obj->vram_node;
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
int npages)
@@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
@@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else {
drm_mm_remove_node(msm_obj->vram_node);
@@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
unsigned sz;
+ bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- sz = sizeof(*msm_obj);
if (!iommu_present(&platform_bus_type))
+ use_vram = true;
+ else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ use_vram = true;
+
+ if (WARN_ON(use_vram && !priv->vram.size))
+ return -EINVAL;
+
+ sz = sizeof(*msm_obj);
+ if (use_vram)
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (!iommu_present(&platform_bus_type))
+ if (use_vram)
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
@@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- if (iommu_present(&platform_bus_type)) {
+ if (use_pages(obj)) {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8fbbd0594c46..85d481e29276 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -21,6 +21,9 @@
#include <linux/reservation.h>
#include "msm_drv.h"
+/* Additional internal-use only BO flags: */
+#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -59,7 +62,7 @@ struct msm_gem_object {
struct reservation_object _resv;
/* For physically contiguous buffers. Used when we don't have
- * an IOMMU.
+ * an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
};
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 3a78cb48662b..a9f17bdb4530 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -47,6 +47,10 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
+ int (*set_split_display)(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 542bb266a0ab..3d96b49fe662 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -703,7 +703,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
@@ -724,7 +724,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
static void nv_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index d7b495a5f30c..af7249ca0f4b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -358,7 +358,7 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
static void nv04_dac_prepare(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
@@ -409,7 +409,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index f6ca343fd34a..7cfb0cbc9b6e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -244,7 +244,7 @@ static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
static void nv04_dfp_prepare(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
@@ -445,7 +445,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_output *dcbe = nv_encoder->dcb;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index f96237ef2a6b..4131be5507ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -109,7 +109,7 @@ nv04_display_create(struct drm_device *dev)
crtc->funcs->save(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->save(encoder);
}
@@ -138,7 +138,7 @@ nv04_display_destroy(struct drm_device *dev)
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
@@ -169,7 +169,7 @@ nv04_display_init(struct drm_device *dev)
* on suspend too.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index d9664b37def1..70e95cf6fd19 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -122,7 +122,7 @@ static void nv04_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -164,7 +164,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 731d74efc1e5..d9720dda8385 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -405,7 +405,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
@@ -583,7 +583,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
nv17_tv_update_rescaler(encoder);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 5ad17fc36ae3..0b5af0fe8659 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -12,6 +12,13 @@
#define NV_DMA_TO_MEMORY 0x00000003
#define NV_DMA_IN_MEMORY 0x0000003d
+#define FERMI_TWOD_A 0x0000902d
+
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d
+
+#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
+#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+
#define NV04_DISP 0x00000046
#define NV03_CHANNEL_DMA 0x0000006b
@@ -25,6 +32,7 @@
#define G82_CHANNEL_GPFIFO 0x0000826f
#define FERMI_CHANNEL_GPFIFO 0x0000906f
#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f
+#define MAXWELL_CHANNEL_GPFIFO_A 0x0000b06f
#define NV50_DISP 0x00005070
#define G82_DISP 0x00008270
@@ -84,6 +92,7 @@
#define KEPLER_C 0x0000a297
#define MAXWELL_A 0x0000b097
+#define MAXWELL_B 0x0000b197
#define FERMI_COMPUTE_A 0x000090c0
#define FERMI_COMPUTE_B 0x000091c0
@@ -92,6 +101,7 @@
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
+#define MAXWELL_COMPUTE_B 0x0000b1c0
/*******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 7e29c52617ea..e832f729e1b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -10,4 +10,7 @@ extern struct nvkm_oclass gf100_ce1_oclass;
extern struct nvkm_oclass gk104_ce0_oclass;
extern struct nvkm_oclass gk104_ce1_oclass;
extern struct nvkm_oclass gk104_ce2_oclass;
+extern struct nvkm_oclass gm204_ce0_oclass;
+extern struct nvkm_oclass gm204_ce1_oclass;
+extern struct nvkm_oclass gm204_ce2_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 05321ce7ab15..97cdeab8e44c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -116,6 +116,7 @@ extern struct nvkm_oclass *gf100_fifo_oclass;
extern struct nvkm_oclass *gk104_fifo_oclass;
extern struct nvkm_oclass *gk20a_fifo_oclass;
extern struct nvkm_oclass *gk208_fifo_oclass;
+extern struct nvkm_oclass *gm204_fifo_oclass;
int nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
struct nvkm_notify *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 93ef1f2bfac4..7cbe20280760 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -38,7 +38,7 @@ nvkm_gr(void *obj)
}
#define nvkm_gr_create(p,e,c,y,d) \
- nvkm_engine_create((p), (e), (c), (y), "PGR", "graphics", (d))
+ nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
#define nvkm_gr_destroy(d) \
nvkm_engine_destroy(&(d)->base)
#define nvkm_gr_init(d) \
@@ -72,6 +72,8 @@ extern struct nvkm_oclass *gk110_gr_oclass;
extern struct nvkm_oclass *gk110b_gr_oclass;
extern struct nvkm_oclass *gk208_gr_oclass;
extern struct nvkm_oclass *gm107_gr_oclass;
+extern struct nvkm_oclass *gm204_gr_oclass;
+extern struct nvkm_oclass *gm206_gr_oclass;
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index d104c1aac807..1bcb763cfca0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -45,4 +45,5 @@ nvkm_instmem(void *obj)
extern struct nvkm_oclass *nv04_instmem_oclass;
extern struct nvkm_oclass *nv40_instmem_oclass;
extern struct nvkm_oclass *nv50_instmem_oclass;
+extern struct nvkm_oclass *gk20a_instmem_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 7b86acc634a0..755942352557 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -35,6 +35,7 @@ extern struct nvkm_oclass *gt215_pmu_oclass;
extern struct nvkm_oclass *gf100_pmu_oclass;
extern struct nvkm_oclass *gf110_pmu_oclass;
extern struct nvkm_oclass *gk104_pmu_oclass;
+extern struct nvkm_oclass *gk110_pmu_oclass;
extern struct nvkm_oclass *gk208_pmu_oclass;
extern struct nvkm_oclass *gk20a_pmu_oclass;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 77326e344dad..6edcce1658b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1110,6 +1110,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e581f63cbf25..0589babc506e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -184,7 +184,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 handle, u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
G82_CHANNEL_GPFIFO,
NV50_CHANNEL_GPFIFO,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index db7095ae4ebb..3162040bc314 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -309,7 +309,7 @@ detect_analog:
nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_encoder_helper_funcs *helper =
+ const struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
if (helper->detect(encoder, connector) ==
@@ -592,7 +592,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
static struct drm_display_mode *
nouveau_connector_native_mode(struct drm_connector *connector)
{
- struct drm_connector_helper_funcs *helper = connector->helper_private;
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 860b0e2d4181..8670d90cdc11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -869,13 +869,20 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct nouveau_bo *bo;
+ uint32_t domain;
int ret;
args->pitch = roundup(args->width * (args->bpp / 8), 256);
args->size = args->pitch * args->height;
args->size = roundup(args->size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+ /* Use VRAM if there is any ; otherwise fallback to system memory */
+ if (nouveau_drm(dev)->device.info.ram_size != 0)
+ domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ domain = NOUVEAU_GEM_DOMAIN_GART;
+
+ ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8763deb5188b..89049335b738 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -181,6 +181,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
break;
case FERMI_CHANNEL_GPFIFO:
case KEPLER_CHANNEL_GPFIFO_A:
+ case MAXWELL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index fc68f0973f9e..dd726523ca99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,7 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_PATCHLEVEL 2
/*
* 1.1.1:
@@ -28,6 +28,8 @@
* - fermi,kepler,maxwell zbc
* 1.2.1:
* - allow concurrent access to bo's mapped read/write.
+ * 1.2.2:
+ * - add NOUVEAU_GEM_DOMAIN_COHERENT flag
*/
#include <nvif/client.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7c077fced1d1..0e690bf19fc9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -189,6 +189,9 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
+ flags |= TTM_PL_FLAG_UNCACHED;
+
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, NULL, NULL, pnvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dc5900bf54ff..775277f1edb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <linux/iommu.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
@@ -91,6 +92,72 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
return 0;
}
+static void nouveau_platform_probe_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+ int err;
+ unsigned long pgsize_bitmap;
+
+ mutex_init(&gpu->iommu.mutex);
+
+ if (iommu_present(&platform_bus_type)) {
+ gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+ if (IS_ERR(gpu->iommu.domain))
+ goto error;
+
+ /*
+ * A IOMMU is only usable if it supports page sizes smaller
+ * or equal to the system's PAGE_SIZE, with a preference if
+ * both are equal.
+ */
+ pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
+ if (pgsize_bitmap & PAGE_SIZE) {
+ gpu->iommu.pgshift = PAGE_SHIFT;
+ } else {
+ gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+ if (gpu->iommu.pgshift == 0) {
+ dev_warn(dev, "unsupported IOMMU page size\n");
+ goto free_domain;
+ }
+ gpu->iommu.pgshift -= 1;
+ }
+
+ err = iommu_attach_device(gpu->iommu.domain, dev);
+ if (err)
+ goto free_domain;
+
+ err = nvkm_mm_init(&gpu->iommu._mm, 0,
+ (1ULL << 40) >> gpu->iommu.pgshift, 1);
+ if (err)
+ goto detach_device;
+
+ gpu->iommu.mm = &gpu->iommu._mm;
+ }
+
+ return;
+
+detach_device:
+ iommu_detach_device(gpu->iommu.domain, dev);
+
+free_domain:
+ iommu_domain_free(gpu->iommu.domain);
+
+error:
+ gpu->iommu.domain = NULL;
+ gpu->iommu.pgshift = 0;
+ dev_err(dev, "cannot initialize IOMMU MM\n");
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+ if (gpu->iommu.domain) {
+ nvkm_mm_fini(&gpu->iommu._mm);
+ iommu_detach_device(gpu->iommu.domain, dev);
+ iommu_domain_free(gpu->iommu.domain);
+ }
+}
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;
@@ -118,6 +185,8 @@ static int nouveau_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->clk_pwr))
return PTR_ERR(gpu->clk_pwr);
+ nouveau_platform_probe_iommu(&pdev->dev, gpu);
+
err = nouveau_platform_power_up(gpu);
if (err)
return err;
@@ -140,10 +209,9 @@ static int nouveau_platform_probe(struct platform_device *pdev)
err_unref:
drm_dev_unref(drm);
- return 0;
-
power_down:
nouveau_platform_power_down(gpu);
+ nouveau_platform_remove_iommu(&pdev->dev, gpu);
return err;
}
@@ -154,10 +222,15 @@ static int nouveau_platform_remove(struct platform_device *pdev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
+ int err;
nouveau_drm_device_remove(drm_dev);
- return nouveau_platform_power_down(gpu);
+ err = nouveau_platform_power_down(gpu);
+
+ nouveau_platform_remove_iommu(&pdev->dev, gpu);
+
+ return err;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 268bb7213681..392874cf4725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -24,10 +24,12 @@
#define __NOUVEAU_PLATFORM_H__
#include "core/device.h"
+#include "core/mm.h"
struct reset_control;
struct clk;
struct regulator;
+struct iommu_domain;
struct platform_driver;
struct nouveau_platform_gpu {
@@ -36,6 +38,22 @@ struct nouveau_platform_gpu {
struct clk *clk_pwr;
struct regulator *vdd;
+
+ struct {
+ /*
+ * Protects accesses to mm from subsystems
+ */
+ struct mutex mutex;
+
+ struct nvkm_mm _mm;
+ /*
+ * Just points to _mm. We need this to avoid embedding
+ * struct nvkm_mm in os.h
+ */
+ struct nvkm_mm *mm;
+ struct iommu_domain *domain;
+ unsigned long pgshift;
+ } iommu;
};
struct nouveau_platform_device {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 273e50110ec3..18f449715788 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -82,6 +82,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
u32 size_nc = 0;
int ret;
+ if (drm->device.info.ram_size == 0)
+ return -ENOMEM;
+
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index bf429cabbaa8..a03db4368696 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -215,6 +215,7 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
struct nv84_fence_priv *priv;
+ u32 domain;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -231,10 +232,17 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
- ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
+ /* Use VRAM if there is any ; otherwise fallback to system memory */
+ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
+ /*
+ * fences created in sysmem must be non-cached or we
+ * will lose CPU/GPU coherency!
+ */
+ TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0,
+ 0, NULL, NULL, &priv->bo);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, domain, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 858797453e0b..fa8cda7058cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -1,3 +1,4 @@
nvkm-y += nvkm/engine/ce/gt215.o
nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
+nvkm-y += nvkm/engine/ce/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
new file mode 100644
index 000000000000..577eb2eead05
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <engine/ce.h>
+
+#include <core/engctx.h>
+
+struct gm204_ce_priv {
+ struct nvkm_engine base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nvkm_oclass
+gm204_ce_sclass[] = {
+ { 0xb0b5, &nvkm_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCE context
+ ******************************************************************************/
+
+static struct nvkm_ofuncs
+gm204_ce_context_ofuncs = {
+ .ctor = _nvkm_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
+};
+
+static struct nvkm_oclass
+gm204_ce_cclass = {
+ .handle = NV_ENGCTX(CE0, 0x24),
+ .ofuncs = &gm204_ce_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCE engine/subdev functions
+ ******************************************************************************/
+
+static void
+gm204_ce_intr(struct nvkm_subdev *subdev)
+{
+ const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
+ struct gm204_ce_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
+
+ if (stat) {
+ nv_warn(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ }
+}
+
+static int
+gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE0", "ce0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+static int
+gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE1", "ce1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+static int
+gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE2", "ce2", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00200000;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+struct nvkm_oclass
+gm204_ce0_oclass = {
+ .handle = NV_ENGINE(CE0, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce0_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gm204_ce1_oclass = {
+ .handle = NV_ENGINE(CE1, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce1_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gm204_ce2_oclass = {
+ .handle = NV_ENGINE(CE2, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce2_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 6efa8f38ff54..63d8e52f4b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -139,9 +139,13 @@ nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
args->v0.chipset = device->chipset;
args->v0.revision = device->chiprev;
- if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
- else args->v0.ram_size = args->v0.ram_user = 0;
- if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
+ if (pfb && pfb->ram)
+ args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
+ else
+ args->v0.ram_size = args->v0.ram_user = 0;
+ if (imem && args->v0.ram_size > 0)
+ args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
index bf5893458a47..6a9483f65d83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
@@ -171,7 +171,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
@@ -202,7 +202,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
@@ -236,7 +236,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 108d048da764..70abf1ec7c98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -127,16 +127,14 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = gm204_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
@@ -170,16 +168,14 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = gm206_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 0ebf466e9ef3..9ef6728c528d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -413,8 +413,8 @@ gf110_disp_base_mthd_base = {
static const struct nv50_disp_mthd_list
gf110_disp_base_mthd_image = {
- .mthd = 0x0400,
- .addr = 0x000400,
+ .mthd = 0x0020,
+ .addr = 0x000020,
.data = {
{ 0x0400, 0x661400 },
{ 0x0404, 0x661404 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 84ade810e27c..8ba808df24ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -229,7 +229,7 @@ nv50_disp_dmac_create_(struct nvkm_object *parent,
switch (dmac->pushdma->target) {
case NV_MEM_TARGET_VRAM:
- dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index c5a2d8718c5b..42891cb71ea3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -9,3 +9,4 @@ nvkm-y += nvkm/engine/fifo/gf100.o
nvkm-y += nvkm/engine/fifo/gk104.o
nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 9585539e59f2..e10f9644140f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -323,8 +323,8 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nvkm_ofuncs
-gk104_fifo_ofuncs = {
+struct nvkm_ofuncs
+gk104_fifo_chan_ofuncs = {
.ctor = gk104_fifo_chan_ctor,
.dtor = _nvkm_fifo_channel_dtor,
.init = gk104_fifo_chan_init,
@@ -337,7 +337,7 @@ gk104_fifo_ofuncs = {
static struct nvkm_oclass
gk104_fifo_sclass[] = {
- { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_ofuncs },
+ { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
{}
};
@@ -774,6 +774,7 @@ gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
while (object) {
switch (nv_mclass(object)) {
case KEPLER_CHANNEL_GPFIFO_A:
+ case MAXWELL_CHANNEL_GPFIFO_A:
gk104_fifo_recover(priv, engine, (void *)object);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 3046e00ed6ba..318d30d6ee1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -13,4 +13,6 @@ struct gk104_fifo_impl {
struct nvkm_oclass base;
u32 channels;
};
+
+extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
new file mode 100644
index 000000000000..749d525dd8e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+
+#include <nvif/class.h>
+
+static struct nvkm_oclass
+gm204_fifo_sclass[] = {
+ { MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
+ {}
+};
+
+static int
+gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
+ if (ret == 0) {
+ struct gk104_fifo_priv *priv = (void *)*pobject;
+ nv_engine(priv)->sclass = gm204_fifo_sclass;
+ }
+ return ret;
+}
+
+struct nvkm_oclass *
+gm204_fifo_oclass = &(struct gk104_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_fifo_ctor,
+ .dtor = gk104_fifo_dtor,
+ .init = gk104_fifo_init,
+ .fini = _nvkm_fifo_fini,
+ },
+ .channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1771d944591b..2e1b92f71d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -12,6 +12,8 @@ nvkm-y += nvkm/engine/gr/ctxgk110.o
nvkm-y += nvkm/engine/gr/ctxgk110b.o
nvkm-y += nvkm/engine/gr/ctxgk208.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
nvkm-y += nvkm/engine/gr/nv20.o
@@ -34,3 +36,5 @@ nvkm-y += nvkm/engine/gr/gk110.o
nvkm-y += nvkm/engine/gr/gk110b.o
nvkm-y += nvkm/engine/gr/gk208.o
nvkm-y += nvkm/engine/gr/gm107.o
+nvkm-y += nvkm/engine/gr/gm204.o
+nvkm-y += nvkm/engine/gr/gm206.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 1166b1aa1525..3676a3342bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -88,11 +88,22 @@ void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+
extern struct nvkm_oclass *gk110_grctx_oclass;
extern struct nvkm_oclass *gk110b_grctx_oclass;
extern struct nvkm_oclass *gk208_grctx_oclass;
+
extern struct nvkm_oclass *gm107_grctx_oclass;
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
+
+extern struct nvkm_oclass *gm204_grctx_oclass;
+void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+
+extern struct nvkm_oclass *gm206_grctx_oclass;
/* context init value lists */
@@ -196,4 +207,22 @@ extern const struct gf100_gr_init gk208_grctx_init_rstr2d_0[];
extern const struct gf100_gr_init gk208_grctx_init_prop_0[];
extern const struct gf100_gr_init gk208_grctx_init_crstr_0[];
+
+extern const struct gf100_gr_init gm107_grctx_init_gpc_unk_0[];
+extern const struct gf100_gr_init gm107_grctx_init_wwdx_0[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_icmd[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_mthd[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_hub[];
+
+extern const struct gf100_gr_init gm204_grctx_init_prop_0[];
+extern const struct gf100_gr_init gm204_grctx_init_setup_0[];
+extern const struct gf100_gr_init gm204_grctx_init_gpm_0[];
+extern const struct gf100_gr_init gm204_grctx_init_gpc_unk_2[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_tpc[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_ppc[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 5e9454ba158f..b12f6a9fd926 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -941,6 +941,14 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
}
void
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+{
+ const u32 fbp_count = nv_rd32(priv, 0x120074);
+ nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
@@ -970,13 +978,7 @@ gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
- if (priv->gpc_nr == 1) {
- nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
- nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
- } else {
- nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
- nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
- }
+ gk104_grctx_generate_rop_active_fbps(priv);
nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
gf100_gr_icmd(priv, oclass->icmd);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index b2fae6e389e2..fbeaae3ae6ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -699,7 +699,7 @@ gm107_grctx_pack_hub[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_grctx_init_gpc_unk_0[] = {
{ 0x418380, 1, 0x04, 0x00000056 },
{}
@@ -834,7 +834,7 @@ gm107_grctx_init_cbm_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_grctx_init_wwdx_0[] = {
{ 0x41bf00, 1, 0x04, 0x0a418820 },
{ 0x41bf04, 1, 0x04, 0x062080e6 },
@@ -860,7 +860,7 @@ gm107_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
+void
gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
@@ -877,7 +877,7 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
-static void
+void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
@@ -892,7 +892,7 @@ gm107_grctx_generate_pagepool(struct gf100_grctx *info)
mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */
}
-static void
+void
gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr_priv *priv = info->priv;
@@ -926,7 +926,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
- mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs);
+ mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
}
}
}
@@ -982,13 +982,7 @@ gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
- if (priv->gpc_nr == 1) {
- nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
- nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
- } else {
- nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
- nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
- }
+ gk104_grctx_generate_rop_active_fbps(priv);
gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
new file mode 100644
index 000000000000..ea8e66151aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct gf100_gr_init
+gm204_grctx_init_icmd_0[] = {
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000374, 1, 0x01, 0x00000100 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000b4, 4, 0x01, 0x88888888 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x0000b1, 2, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000a8, 1, 0x01, 0x0000ffff },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000374, 1, 0x01, 0x00000100 },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x0005aa, 1, 0x01, 0x00000002 },
+ { 0x0005cb, 1, 0x01, 0x00000004 },
+ { 0x0005d0, 1, 0x01, 0x20181008 },
+ { 0x0005d1, 1, 0x01, 0x40383028 },
+ { 0x0005d2, 1, 0x01, 0x60585048 },
+ { 0x0005d3, 1, 0x01, 0x80787068 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x0005e0, 16, 0x01, 0x00000d10 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000550, 32, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x000595, 1, 0x01, 0x00400040 },
+ { 0x000596, 1, 0x01, 0x00000492 },
+ { 0x000597, 1, 0x01, 0x08080203 },
+ { 0x0005ad, 1, 0x01, 0x00000008 },
+ { 0x000598, 1, 0x01, 0x00020001 },
+ { 0x0005d4, 1, 0x01, 0x00000001 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000674, 1, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x0000000c },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c413, 4, 0x01, 0x88888888 },
+ { 0x00c423, 1, 0x01, 0x0000ff00 },
+ { 0x00c420, 1, 0x01, 0x00880101 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_icmd[] = {
+ { gm204_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_b197_0[] = {
+ { 0x000800, 8, 0x40, 0x00000000 },
+ { 0x000804, 8, 0x40, 0x00000000 },
+ { 0x000808, 8, 0x40, 0x00000400 },
+ { 0x00080c, 8, 0x40, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 7, 0x40, 0x00000000 },
+ { 0x000814, 8, 0x40, 0x00000040 },
+ { 0x000818, 8, 0x40, 0x00000001 },
+ { 0x00081c, 8, 0x40, 0x00000000 },
+ { 0x000820, 8, 0x40, 0x00000000 },
+ { 0x001c00, 16, 0x10, 0x00000000 },
+ { 0x001c04, 16, 0x10, 0x00000000 },
+ { 0x001c08, 16, 0x10, 0x00000000 },
+ { 0x001c0c, 16, 0x10, 0x00000000 },
+ { 0x001d00, 16, 0x10, 0x00000000 },
+ { 0x001d04, 16, 0x10, 0x00000000 },
+ { 0x001d08, 16, 0x10, 0x00000000 },
+ { 0x001d0c, 16, 0x10, 0x00000000 },
+ { 0x001f00, 16, 0x08, 0x00000000 },
+ { 0x001f04, 16, 0x08, 0x00000000 },
+ { 0x001f80, 16, 0x08, 0x00000000 },
+ { 0x001f84, 16, 0x08, 0x00000000 },
+ { 0x002000, 1, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 6, 0x40, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 4, 0x20, 0x00000000 },
+ { 0x000384, 4, 0x20, 0x00000000 },
+ { 0x000388, 4, 0x20, 0x00000000 },
+ { 0x00038c, 4, 0x20, 0x00000000 },
+ { 0x000700, 4, 0x10, 0x00000000 },
+ { 0x000704, 4, 0x10, 0x00000000 },
+ { 0x000708, 4, 0x10, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 16, 0x20, 0x00000000 },
+ { 0x000a04, 16, 0x20, 0x00000000 },
+ { 0x000a08, 16, 0x20, 0x00000000 },
+ { 0x000a0c, 16, 0x20, 0x00000000 },
+ { 0x000a10, 16, 0x20, 0x00000000 },
+ { 0x000a14, 16, 0x20, 0x00000000 },
+ { 0x000a18, 16, 0x20, 0x00006420 },
+ { 0x000a1c, 16, 0x20, 0x00000000 },
+ { 0x000c00, 16, 0x10, 0x00000000 },
+ { 0x000c04, 16, 0x10, 0x00000000 },
+ { 0x000c08, 16, 0x10, 0x00000000 },
+ { 0x000c0c, 16, 0x10, 0x3f800000 },
+ { 0x000d00, 8, 0x08, 0xffff0000 },
+ { 0x000d04, 8, 0x08, 0xffff0000 },
+ { 0x000e00, 16, 0x10, 0x00000000 },
+ { 0x000e04, 16, 0x10, 0xffff0000 },
+ { 0x000e08, 16, 0x10, 0xffff0000 },
+ { 0x000d40, 4, 0x08, 0x00000000 },
+ { 0x000d44, 4, 0x08, 0x00000000 },
+ { 0x001e00, 8, 0x20, 0x00000001 },
+ { 0x001e04, 8, 0x20, 0x00000001 },
+ { 0x001e08, 8, 0x20, 0x00000002 },
+ { 0x001e0c, 8, 0x20, 0x00000001 },
+ { 0x001e10, 8, 0x20, 0x00000001 },
+ { 0x001e14, 8, 0x20, 0x00000002 },
+ { 0x001e18, 8, 0x20, 0x00000001 },
+ { 0x001480, 8, 0x10, 0x00000000 },
+ { 0x001484, 8, 0x10, 0x00000000 },
+ { 0x001488, 8, 0x10, 0x00000000 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x001208, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000754, 1, 0x04, 0x00000001 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x0011e0, 4, 0x04, 0x88888888 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fa4, 1, 0x04, 0x00000001 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000fa8, 1, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001080, 2, 0x04, 0x00000000 },
+ { 0x001088, 2, 0x04, 0x00000001 },
+ { 0x001090, 1, 0x04, 0x00000000 },
+ { 0x001094, 1, 0x04, 0x00000001 },
+ { 0x001098, 1, 0x04, 0x00000000 },
+ { 0x00109c, 1, 0x04, 0x00000001 },
+ { 0x0010a0, 2, 0x04, 0x00000000 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x00113c, 1, 0x04, 0x00000000 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x000f40, 5, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x001004, 1, 0x04, 0x00000000 },
+ { 0x001240, 8, 0x04, 0x00000000 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 1, 0x04, 0x00000000 },
+ { 0x001148, 1, 0x04, 0x00000000 },
+ { 0x000fb4, 1, 0x04, 0x00000000 },
+ { 0x000fb8, 1, 0x04, 0x00000002 },
+ { 0x001130, 1, 0x04, 0x00000002 },
+ { 0x000fd4, 2, 0x04, 0x00000000 },
+ { 0x001030, 1, 0x04, 0x20181008 },
+ { 0x001034, 1, 0x04, 0x40383028 },
+ { 0x001038, 1, 0x04, 0x60585048 },
+ { 0x00103c, 1, 0x04, 0x80787068 },
+ { 0x000744, 1, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000f60, 1, 0x04, 0x00000000 },
+ { 0x000f64, 1, 0x04, 0x00400040 },
+ { 0x000f68, 1, 0x04, 0x00002212 },
+ { 0x000f6c, 1, 0x04, 0x08080203 },
+ { 0x001108, 1, 0x04, 0x00000008 },
+ { 0x000f70, 1, 0x04, 0x00080001 },
+ { 0x000ffc, 1, 0x04, 0x00000000 },
+ { 0x001134, 1, 0x04, 0x00000000 },
+ { 0x000f1c, 1, 0x04, 0x00000000 },
+ { 0x0011f8, 1, 0x04, 0x00000000 },
+ { 0x001138, 1, 0x04, 0x00000001 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x001224, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x000dd0, 1, 0x04, 0x00000000 },
+ { 0x000dd4, 1, 0x04, 0x00000001 },
+ { 0x0002f4, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x00111c, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000f14, 1, 0x04, 0x00000000 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x0000000c },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000da8, 1, 0x04, 0x00000030 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x0002d0, 1, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_mthd[] = {
+ { gm204_grctx_init_b197_0, 0xb197 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_fe_0[] = {
+ { 0x404004, 8, 0x04, 0x00000000 },
+ { 0x404024, 1, 0x04, 0x0000e000 },
+ { 0x404028, 8, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
+ { 0x4040c8, 1, 0x04, 0xf801008f },
+ { 0x4040d0, 6, 0x04, 0x00000000 },
+ { 0x4040f8, 1, 0x04, 0x00000000 },
+ { 0x404100, 10, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
+ { 0x404150, 1, 0x04, 0x0000002e },
+ { 0x404154, 2, 0x04, 0x00000800 },
+ { 0x404164, 1, 0x04, 0x00000045 },
+ { 0x40417c, 2, 0x04, 0x00000000 },
+ { 0x404194, 1, 0x04, 0x33000700 },
+ { 0x4041a0, 4, 0x04, 0x00000000 },
+ { 0x4041c4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_ds_0[] = {
+ { 0x405800, 1, 0x04, 0x8f8001bf },
+ { 0x405830, 1, 0x04, 0x04001000 },
+ { 0x405834, 1, 0x04, 0x08000000 },
+ { 0x405838, 1, 0x04, 0x00010000 },
+ { 0x405854, 1, 0x04, 0x00000000 },
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
+ { 0x405a1c, 1, 0x04, 0x000000ff },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_cwd_0[] = {
+ { 0x405b00, 1, 0x04, 0x00000000 },
+ { 0x405b10, 1, 0x04, 0x00001000 },
+ { 0x405b20, 1, 0x04, 0x04000000 },
+ { 0x405b60, 6, 0x04, 0x00000000 },
+ { 0x405ba0, 6, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x17410001 },
+ { 0x406028, 4, 0x04, 0x00000001 },
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b0, 3, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x80400280 },
+ { 0x4064c4, 1, 0x04, 0x0400ffff },
+ { 0x4064c8, 1, 0x04, 0x01800780 },
+ { 0x4064cc, 9, 0x04, 0x00000000 },
+ { 0x4064fc, 1, 0x04, 0x0000022a },
+ { 0x406500, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_be_0[] = {
+ { 0x408800, 1, 0x04, 0x32882a3c },
+ { 0x408804, 1, 0x04, 0x00000040 },
+ { 0x408808, 1, 0x04, 0x1003e005 },
+ { 0x408840, 1, 0x04, 0x00000e0b },
+ { 0x408900, 1, 0x04, 0xb080b801 },
+ { 0x408904, 1, 0x04, 0x63038001 },
+ { 0x408908, 1, 0x04, 0x12c8502f },
+ { 0x408980, 1, 0x04, 0x0000011d },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gm204_grctx_init_fe_0 },
+ { gk110_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
+ { gm204_grctx_init_ds_0 },
+ { gm204_grctx_init_cwd_0 },
+ { gm204_grctx_init_pd_0 },
+ { gk208_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
+ { gm204_grctx_init_be_0 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_prop_0[] = {
+ { 0x418400, 1, 0x04, 0x38e01e00 },
+ { 0x418404, 1, 0x04, 0x70001fff },
+ { 0x41840c, 1, 0x04, 0x20001008 },
+ { 0x418410, 2, 0x04, 0x0fff0fff },
+ { 0x418418, 1, 0x04, 0x07ff07ff },
+ { 0x41841c, 1, 0x04, 0x3feffbff },
+ { 0x418450, 6, 0x04, 0x00000000 },
+ { 0x418468, 1, 0x04, 0x00000001 },
+ { 0x41846c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_gpc_unk_1[] = {
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 1, 0x04, 0x00000080 },
+ { 0x418708, 1, 0x04, 0x40000000 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418728, 1, 0x04, 0x00010000 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_setup_0[] = {
+ { 0x418800, 1, 0x04, 0x7006863a },
+ { 0x418808, 1, 0x04, 0x00000000 },
+ { 0x418810, 1, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00000044 },
+ { 0x418830, 1, 0x04, 0x10000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100058 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_gpm_0[] = {
+ { 0x418c10, 8, 0x04, 0x00000000 },
+ { 0x418c40, 1, 0x04, 0xffffffff },
+ { 0x418c6c, 1, 0x04, 0x00000001 },
+ { 0x418c80, 1, 0x04, 0x20200000 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_gpc_unk_2[] = {
+ { 0x418e00, 1, 0x04, 0x90040000 },
+ { 0x418e24, 1, 0x04, 0x00000000 },
+ { 0x418e28, 1, 0x04, 0x00000030 },
+ { 0x418e2c, 1, 0x04, 0x00000100 },
+ { 0x418e30, 3, 0x04, 0x00000000 },
+ { 0x418e40, 22, 0x04, 0x00000000 },
+ { 0x418ea0, 12, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_pack
+gm204_grctx_pack_gpc[] = {
+ { gm107_grctx_init_gpc_unk_0 },
+ { gm204_grctx_init_prop_0 },
+ { gm204_grctx_init_gpc_unk_1 },
+ { gm204_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gm204_grctx_init_gpm_0 },
+ { gm204_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pe_0[] = {
+ { 0x419848, 1, 0x04, 0x00000000 },
+ { 0x419864, 1, 0x04, 0x00000029 },
+ { 0x419888, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_tex_0[] = {
+ { 0x419a00, 1, 0x04, 0x000100f0 },
+ { 0x419a04, 1, 0x04, 0x00000005 },
+ { 0x419a08, 1, 0x04, 0x00000621 },
+ { 0x419a0c, 1, 0x04, 0x00320000 },
+ { 0x419a10, 1, 0x04, 0x00000000 },
+ { 0x419a14, 1, 0x04, 0x00000200 },
+ { 0x419a1c, 1, 0x04, 0x0010c000 },
+ { 0x419a20, 1, 0x04, 0x20008a00 },
+ { 0x419a30, 1, 0x04, 0x00000001 },
+ { 0x419a3c, 1, 0x04, 0x0000181e },
+ { 0x419ac4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_mpc_0[] = {
+ { 0x419c00, 1, 0x04, 0x0000009a },
+ { 0x419c04, 1, 0x04, 0x80000bd6 },
+ { 0x419c08, 1, 0x04, 0x00000002 },
+ { 0x419c20, 1, 0x04, 0x00000000 },
+ { 0x419c24, 1, 0x04, 0x00084210 },
+ { 0x419c28, 1, 0x04, 0x3efbefbe },
+ { 0x419c2c, 1, 0x04, 0x00000000 },
+ { 0x419c34, 1, 0x04, 0x71ff1ff3 },
+ { 0x419c3c, 1, 0x04, 0x00001919 },
+ { 0x419c50, 1, 0x04, 0x00000005 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_l1c_0[] = {
+ { 0x419c84, 1, 0x04, 0x0000003e },
+ { 0x419c90, 1, 0x04, 0x0000000a },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_sm_0[] = {
+ { 0x419e04, 3, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x00d3eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 1, 0x04, 0x00000000 },
+ { 0x419e58, 6, 0x04, 0x00000000 },
+ { 0x419e74, 10, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x0001cf8b },
+ { 0x419eb0, 1, 0x04, 0x00030300 },
+ { 0x419eb8, 1, 0x04, 0x40000000 },
+ { 0x419ef0, 24, 0x04, 0x00000000 },
+ { 0x419f68, 2, 0x04, 0x00000000 },
+ { 0x419f70, 1, 0x04, 0x00000020 },
+ { 0x419f78, 1, 0x04, 0x00010beb },
+ { 0x419f7c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_tpc[] = {
+ { gm204_grctx_init_pe_0 },
+ { gm204_grctx_init_tex_0 },
+ { gm204_grctx_init_mpc_0 },
+ { gm204_grctx_init_l1c_0 },
+ { gm204_grctx_init_sm_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pes_0[] = {
+ { 0x41be24, 1, 0x04, 0x0000000e },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_cbm_0[] = {
+ { 0x41bec0, 1, 0x04, 0x00000000 },
+ { 0x41bec4, 1, 0x04, 0x01030000 },
+ { 0x41bee4, 1, 0x04, 0x00000000 },
+ { 0x41bef0, 1, 0x04, 0x000003ff },
+ { 0x41bef4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_ppc[] = {
+ { gm204_grctx_init_pes_0 },
+ { gm204_grctx_init_cbm_0 },
+ { gm107_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+{
+ int gpc, tpc, id;
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ id++;
+ }
+ }
+ }
+}
+
+static void
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+{
+ const u32 fbp_count = nv_rd32(priv, 0x12006c);
+ nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static void
+gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+{
+ const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
+ u32 dist[TPC_MAX] = {};
+ u32 gpcs[GPC_MAX] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < priv->gpc_nr; i++)
+ nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+void
+gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+{
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ u32 tmp;
+ int i;
+
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
+
+ nv_wr32(priv, 0x404154, 0x00000000);
+
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
+ oclass->unkn(priv);
+
+ gm204_grctx_generate_tpcid(priv);
+ gf100_grctx_generate_r406028(priv);
+ gk104_grctx_generate_r418bb8(priv);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+ nv_wr32(priv, 0x406500, 0x00000000);
+
+ nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+
+ gm204_grctx_generate_rop_active_fbps(priv);
+
+ for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
+ tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
+ nv_wr32(priv, 0x4041c4, tmp);
+
+ gm204_grctx_generate_405b60(priv);
+
+ gf100_gr_icmd(priv, oclass->icmd);
+ nv_wr32(priv, 0x404154, 0x00000800);
+ gf100_gr_mthd(priv, oclass->mthd);
+
+ nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
+ nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+}
+
+struct nvkm_oclass *
+gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
+ },
+ .main = gm204_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gm204_grctx_pack_hub,
+ .gpc = gm204_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gm204_grctx_pack_tpc,
+ .ppc = gm204_grctx_pack_ppc,
+ .icmd = gm204_grctx_pack_icmd,
+ .mthd = gm204_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x780,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
new file mode 100644
index 000000000000..91ec41617943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+static const struct gf100_gr_init
+gm206_grctx_init_gpc_unk_1[] = {
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 1, 0x04, 0x00000080 },
+ { 0x418708, 1, 0x04, 0x40000000 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418728, 1, 0x04, 0x00300020 },
+ {}
+};
+
+static const struct gf100_gr_pack
+gm206_grctx_pack_gpc[] = {
+ { gm107_grctx_init_gpc_unk_0 },
+ { gm204_grctx_init_prop_0 },
+ { gm206_grctx_init_gpc_unk_1 },
+ { gm204_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gm204_grctx_init_gpm_0 },
+ { gm204_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
+ {}
+};
+
+struct nvkm_oclass *
+gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x26),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
+ },
+ .main = gm204_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gm204_grctx_pack_hub,
+ .gpc = gm206_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gm204_grctx_pack_tpc,
+ .ppc = gm204_grctx_pack_ppc,
+ .icmd = gm204_grctx_pack_icmd,
+ .mthd = gm204_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x780,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index eaed1599b90f..194afe910d21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -52,6 +52,12 @@ mmio_list_base:
#endif
#ifdef INCLUDE_CODE
+#define gpc_wr32(addr,reg) /*
+*/ mov b32 $r15 reg /*
+*/ imm32($r14, addr) /*
+*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
+*/ call(nv_wr32)
+
// reports an exception to the host
//
// In: $r15 error code (see os.h)
@@ -64,6 +70,43 @@ error:
pop $r14
ret
+#if CHIPSET >= GM107
+tpc_strand_wait:
+ push $r9
+ trace_set(T_STRTPC)
+ tpc_strand_busy:
+ nv_iord($r9, NV_PGRAPH_GPCX_GPCCS_TPC_STATUS, 0)
+ bra b32 $r9 0x0 ne #tpc_strand_busy
+ trace_clr(T_STRTPC)
+ pop $r9
+ ret
+
+#define tpc_strand_wait() call(tpc_strand_wait)
+#define tpc_strand_enable() /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_ENABLE /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#define tpc_strand_disable() /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_DISABLE /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#define tpc_strand_seek(p) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15) /*
+*/ mov $r15 p /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_SELECT, $r15) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SEEK /*
+*/ tpc_strand_wait()
+#define tpc_strand_info(m) /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ mov $r15 m /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_DATA, $r15) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_GET_INFO /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#endif
+
+
// GPC fuc initialisation, executed by triggering ucode start, will
// fall through to main loop after completion.
//
@@ -101,7 +144,7 @@ init:
// enable interrupts
bset $flags ie0
- // figure out which GPC we are, and how many TPCs we have
+ // how many TPCs do we have?
nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
mov $r3 1
and $r2 0x1f
@@ -109,8 +152,12 @@ init:
sub b32 $r3 1
st b32 D[$r0 + #tpc_count] $r2
st b32 D[$r0 + #tpc_mask] $r3
+
+ // determine which GPC we are, setup (optional) mmio access offset
nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
st b32 D[$r0 + #gpc_id] $r2
+ shl b32 $r2 15
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMIO_BASE, 0, $r2)
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// figure out which, and how many, UNKs are actually present
@@ -186,8 +233,56 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
call(strand_ctx_init)
+ add b32 $r2 $r15
add b32 $r3 $r15
+#if CHIPSET >= GM107
+ // calculate size of tpc strand context data
+ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15)
+ tpc_strand_enable();
+ tpc_strand_seek(0);
+ tpc_strand_info(-1);
+
+ ld b32 $r4 D[$r0 + #tpc_count]
+ mov $r5 NV_PGRAPH_GPC0_TPC0
+ ld b32 $r6 D[$r0 + #gpc_id]
+ shl b32 $r6 15
+ add b32 $r5 $r6
+ tpc_strand_init_tpc_loop:
+ add b32 $r14 $r5 NV_TPC_STRAND_CNT
+ call(nv_rd32)
+ mov b32 $r6 $r15
+ clear b32 $r7
+ tpc_strand_init_idx_loop:
+ add b32 $r14 $r5 NV_TPC_STRAND_INDEX
+ mov b32 $r15 $r7
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_SAVE_SWBASE
+ shr b32 $r15 $r2 8
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_LOAD_SWBASE
+ shr b32 $r15 $r2 8
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_WORDS
+ call(nv_rd32)
+ shr b32 $r15 6
+ add b32 $r15 1
+ shl b32 $r15 8
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+ add b32 $r7 1
+ sub b32 $r6 1
+ bra nz #tpc_strand_init_idx_loop
+ add b32 $r5 NV_PGRAPH_GPC0_TPC0__SIZE
+ sub b32 $r4 1
+ bra nz #tpc_strand_init_tpc_loop
+
+ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15)
+ tpc_strand_disable();
+#endif
+
// save context size, and tell HUB we're done
nv_iowr(NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(1), 0, $r3)
clear b32 $r2
@@ -306,6 +401,9 @@ ctx_redswitch:
ctx_xfer:
// set context base address
nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
+#if CHIPSET >= GM107
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_MEM_BASE, $r15)
+#endif
bra not $p1 #ctx_xfer_not_load
call(ctx_redswitch)
ctx_xfer_not_load:
@@ -318,6 +416,14 @@ ctx_xfer:
add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
+#if CHIPSET >= GM107
+ tpc_strand_enable();
+ tpc_strand_seek(0);
+ xbit $r15 $flags $p1 // SAVE/LOAD
+ add b32 $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SAVE
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15)
+#endif
+
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 2 // first
@@ -362,6 +468,9 @@ ctx_xfer:
// wait for strands to finish
call(strand_wait)
+#if CHIPSET >= GM107
+ tpc_strand_wait()
+#endif
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
@@ -370,6 +479,9 @@ ctx_xfer:
bra not $p2 #ctx_xfer_done
ctx_xfer_post:
call(strand_post)
+#if CHIPSET >= GM107
+ tpc_strand_disable()
+#endif
// mark completion in HUB's barrier
ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index ea32f56c0a92..231f696d1e0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -310,7 +310,7 @@ uint32_t gf100_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe04e6,
+ 0x10fe04f8,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -329,157 +329,157 @@ uint32_t gf100_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x0235b600,
- 0xd30007f1,
- 0xd00103f0,
- 0x04bd0003,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb90834,
- 0xd321f502,
- 0x003fbb02,
- 0x010007f1,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0xf10235b6,
+ 0xf0d30007,
+ 0x03d00103,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x02d321f5,
+ 0xbb002fbb,
+ 0x07f1003f,
+ 0x03f00100,
+ 0x0003d002,
+ 0x24bd04bd,
+ 0xf11f29f0,
+ 0xf0080007,
+ 0x02d00203,
+/* 0x04bb: main */
+ 0xf404bd00,
+ 0x28f40031,
+ 0x1cd7f000,
+ 0xf43921f4,
+ 0xe4b0f401,
+ 0x1e18f404,
+ 0xf00181fe,
+ 0x20bd0627,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x05b021f5,
+/* 0x04eb: main_not_ctx_xfer */
+ 0x94d30ef4,
+ 0xf5f010ef,
+ 0x7e21f501,
+ 0xc60ef403,
+/* 0x04f8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x1cd7f02c,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
+ 0xf00421f4,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x0546: ih_no_fifo */
+ 0x07f104bd,
+ 0x03f00100,
+ 0x000ad000,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x056a: hub_barrier_done */
+ 0xf7f001f8,
+ 0x040e9801,
+ 0xb904febb,
+ 0xe7f102ff,
+ 0xe3f09418,
+ 0x9d21f440,
+/* 0x0582: ctx_redswitch */
+ 0xf7f000f8,
+ 0x0007f120,
+ 0x0103f085,
+ 0xbd000fd0,
+ 0x08e7f004,
+/* 0x0594: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x07f10200,
+ 0x03f08500,
+ 0x000fd001,
+ 0x00f804bd,
+/* 0x05b0: ctx_xfer */
+ 0x810007f1,
0xd00203f0,
- 0x04bd0003,
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
-/* 0x04a9: main */
- 0x0031f404,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef4059e,
-/* 0x04d9: main_not_ctx_xfer */
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf4037e21,
-/* 0x04e6: ih */
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0xf104bdf0,
- 0xf00200a7,
- 0xaacf00a3,
- 0x04abc400,
- 0xf02c0bf4,
- 0xe7f11cd7,
- 0xe3f01a00,
- 0x00eecf00,
- 0x1900f7f1,
- 0xcf00f3f0,
- 0x21f400ff,
- 0x01e7f004,
- 0x1d0007f1,
- 0xd00003f0,
- 0x04bd000e,
-/* 0x0534: ih_no_fifo */
- 0x010007f1,
- 0xd00003f0,
- 0x04bd000a,
- 0xe0fcf0fc,
- 0xb0fcd0fc,
- 0x90fca0fc,
- 0x88fe80fc,
- 0xf480fc00,
- 0x01f80032,
-/* 0x0558: hub_barrier_done */
- 0x9801f7f0,
- 0xfebb040e,
- 0x02ffb904,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f89d21,
-/* 0x0570: ctx_redswitch */
- 0xf120f7f0,
- 0xf0850007,
- 0x0fd00103,
- 0xf004bd00,
-/* 0x0582: ctx_redswitch_delay */
- 0xe2b608e7,
- 0xfd1bf401,
- 0x0800f5f1,
- 0x0200f5f1,
- 0x850007f1,
- 0xd00103f0,
0x04bd000f,
-/* 0x059e: ctx_xfer */
- 0x07f100f8,
- 0x03f08100,
- 0x000fd002,
- 0x11f404bd,
- 0x7021f507,
-/* 0x05b1: ctx_xfer_not_load */
- 0x6a21f505,
- 0xf124bd02,
- 0xf047fc07,
+ 0xf50711f4,
+/* 0x05c3: ctx_xfer_not_load */
+ 0xf5058221,
+ 0xbd026a21,
+ 0xfc07f124,
+ 0x0203f047,
+ 0xbd0002d0,
+ 0x012cf004,
+ 0xf10320b6,
+ 0xf04afc07,
0x02d00203,
0xf004bd00,
- 0x20b6012c,
- 0xfc07f103,
- 0x0203f04a,
- 0xbd0002d0,
- 0x01acf004,
- 0xf102a5f0,
- 0xf00000b7,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98000c,
- 0x00e7f001,
- 0x016f21f5,
- 0xf001acf0,
- 0xb7f104a5,
- 0xb3f04000,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0xf1060f98,
- 0xf50800e7,
- 0xf5016f21,
- 0xf4025e21,
- 0x12f40601,
-/* 0x0629: ctx_xfer_post */
- 0x7f21f507,
-/* 0x062d: ctx_xfer_done */
- 0x5821f502,
- 0x0000f805,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xa5f001ac,
+ 0x00b7f102,
+ 0x50b3f000,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xacf0016f,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x5e21f501,
+ 0x0601f402,
+/* 0x063b: ctx_xfer_post */
+ 0xf50712f4,
+/* 0x063f: ctx_xfer_done */
+ 0xf5027f21,
+ 0xf8056a21,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 9a36d9cbb8a5..64d07df4b8b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gf117_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f008,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 49020fff4317..2f596433c222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gk104_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f008,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index c95b07e3bce5..ee8e54db8fc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gk110_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40226b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0300007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f030,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 7e1c28ee7591..fbcc342f896f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
0x02020014,
0xf6120040,
0x04bd0002,
- 0xfe047241,
+ 0xfe048141,
0x00400010,
0x0000f607,
0x040204bd,
@@ -291,20 +291,23 @@ uint32_t gk208_grgpc_code[] = {
0x820603b5,
0xcf018600,
0x02b50022,
+ 0x0f24b604,
+ 0x01c90080,
+ 0xbd0002f6,
0x0c308e04,
0xbd24bd50,
-/* 0x0377: init_unk_loop */
+/* 0x0383: init_unk_loop */
0x7e44bd34,
0xb0000065,
0x0bf400f6,
0xbb010f0e,
0x4ffd04f2,
0x0130b605,
-/* 0x038c: init_unk_next */
+/* 0x0398: init_unk_next */
0xb60120b6,
0x26b004e0,
0xe21bf401,
-/* 0x0398: init_unk_done */
+/* 0x03a4: init_unk_done */
0xb50703b5,
0x00820804,
0x22cf0201,
@@ -338,121 +341,118 @@ uint32_t gk208_grgpc_code[] = {
0xb60824b6,
0x2fb20834,
0x0002687e,
- 0x80003fbb,
- 0xf6020100,
- 0x04bd0003,
- 0x29f024bd,
- 0x3000801f,
- 0x0002f602,
-/* 0x0436: main */
- 0x31f404bd,
- 0x0028f400,
- 0x377e240d,
- 0x01f40000,
- 0x04e4b0f4,
- 0xfe1d18f4,
- 0x06020181,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x097e0018,
- 0x0ef40005,
-/* 0x0465: main_not_ctx_xfer */
- 0x10ef94d4,
- 0x7e01f5f0,
- 0xf40002f8,
-/* 0x0472: ih */
- 0x80f9c70e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x4a04bdf0,
- 0xaacf0200,
- 0x04abc400,
- 0x0d1f0bf4,
- 0x1a004e24,
- 0x4f00eecf,
- 0xffcf1900,
- 0x00047e00,
- 0x40010e00,
- 0x0ef61d00,
-/* 0x04af: ih_no_fifo */
- 0x4004bd00,
- 0x0af60100,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04cf: hub_barrier_done */
- 0x0f01f800,
- 0x040e9801,
- 0xb204febb,
- 0x94188eff,
- 0x008f7e40,
-/* 0x04e3: ctx_redswitch */
- 0x0f00f800,
- 0x85008020,
+ 0xbb002fbb,
+ 0x0080003f,
+ 0x03f60201,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+/* 0x0445: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e24,
+ 0xf401f400,
+ 0xf404e4b0,
+ 0x81fe1d18,
+ 0xbd060201,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x05187e00,
+ 0xd40ef400,
+/* 0x0474: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0xf87e01f5,
+ 0x0ef40002,
+/* 0x0481: ih */
+ 0xfe80f9c7,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x240d1f0b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x04be: ih_no_fifo */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x04de: hub_barrier_done */
+ 0x010f01f8,
+ 0xbb040e98,
+ 0xffb204fe,
+ 0x4094188e,
+ 0x00008f7e,
+/* 0x04f2: ctx_redswitch */
+ 0x200f00f8,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x04ff: ctx_redswitch_delay */
+ 0xb6080e04,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x85008002,
0x000ff601,
- 0x080e04bd,
-/* 0x04f0: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x00800200,
- 0x0ff60185,
- 0xf804bd00,
-/* 0x0509: ctx_xfer */
- 0x81008000,
- 0x000ff602,
- 0x11f404bd,
- 0x04e37e07,
-/* 0x0519: ctx_xfer_not_load */
- 0x02167e00,
- 0x8024bd00,
- 0xf60247fc,
- 0x04bd0002,
- 0xb6012cf0,
- 0xfc800320,
- 0x02f6024a,
+ 0x00f804bd,
+/* 0x0518: ctx_xfer */
+ 0x02810080,
+ 0xbd000ff6,
+ 0x0711f404,
+ 0x0004f27e,
+/* 0x0528: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
0xf004bd00,
- 0xa5f001ac,
- 0x00008b02,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x010d9800,
- 0x3d7e000e,
- 0xacf00001,
- 0x40008b01,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0x4e060f98,
- 0x3d7e0800,
- 0xacf00001,
- 0x04a5f001,
- 0x5030008b,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x5000008b,
0xb6040c98,
0xbcbb0fc4,
- 0x020c9800,
- 0x98030d98,
- 0x004e080f,
- 0x013d7e02,
- 0x020a7e00,
- 0x0601f400,
-/* 0x05a3: ctx_xfer_post */
- 0x7e0712f4,
-/* 0x05a7: ctx_xfer_done */
- 0x7e000227,
- 0xf80004cf,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x01acf000,
+ 0x5040008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0x004e060f,
+ 0x013d7e08,
+ 0x01acf000,
+ 0x8b04a5f0,
+ 0x98503000,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x02004e08,
+ 0x00013d7e,
+ 0x00020a7e,
+ 0xf40601f4,
+/* 0x05b2: ctx_xfer_post */
+ 0x277e0712,
+/* 0x05b6: ctx_xfer_done */
+ 0xde7e0002,
+ 0x00f80004,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
index e730603891d7..47802c7ecca1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
@@ -24,7 +24,7 @@
#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000002
-#define CHIPSET GK208
+#define CHIPSET GM107
#include "macros.fuc"
.section #gm107_grgpc_data
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 6d53b67dd3c4..51f5c3c6e966 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -41,7 +41,7 @@ uint32_t gm107_grgpc_data[] = {
};
uint32_t gm107_grgpc_code[] = {
- 0x03140ef5,
+ 0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
@@ -268,187 +268,319 @@ uint32_t gm107_grgpc_code[] = {
0x409c1c8e,
0x00008f7e,
0x00f8e0fc,
-/* 0x0314: init */
- 0x004104bd,
- 0x0011cf42,
- 0x010911e7,
- 0xfe0814b6,
- 0x02020014,
- 0xf6120040,
- 0x04bd0002,
- 0xfe047241,
- 0x00400010,
- 0x0000f607,
- 0x040204bd,
- 0xf6040040,
- 0x04bd0002,
- 0x821031f4,
- 0xcf018200,
- 0x01030022,
- 0xbb1f24f0,
- 0x32b60432,
- 0x0502b501,
- 0x820603b5,
- 0xcf018600,
- 0x02b50022,
- 0x0c308e04,
- 0xbd24bd50,
-/* 0x0377: init_unk_loop */
- 0x7e44bd34,
- 0xb0000065,
- 0x0bf400f6,
- 0xbb010f0e,
- 0x4ffd04f2,
- 0x0130b605,
-/* 0x038c: init_unk_next */
- 0xb60120b6,
- 0x26b004e0,
- 0xe21bf402,
-/* 0x0398: init_unk_done */
- 0xb50703b5,
- 0x00820804,
- 0x22cf0201,
- 0x9534bd00,
- 0x00800825,
- 0x05f601c0,
- 0x8004bd00,
- 0xf601c100,
+/* 0x0314: tpc_strand_wait */
+ 0x94bd90f9,
+ 0x800a99f0,
+ 0xf6023700,
+ 0x04bd0009,
+/* 0x0324: tpc_strand_busy */
+ 0x033f0089,
+ 0xb30099cf,
+ 0xbdf90094,
+ 0x0a99f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf890fc04,
+/* 0x0341: init */
+ 0x4104bd00,
+ 0x11cf4200,
+ 0x0911e700,
+ 0x0814b601,
+ 0x020014fe,
+ 0x12004002,
+ 0xbd0002f6,
+ 0x05b04104,
+ 0x400010fe,
+ 0x00f60700,
+ 0x0204bd00,
+ 0x04004004,
+ 0xbd0002f6,
+ 0x1031f404,
+ 0x01820082,
+ 0x030022cf,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02b50132,
+ 0x0603b505,
+ 0x01860082,
+ 0xb50022cf,
+ 0x24b60402,
+ 0xc900800f,
+ 0x0002f601,
+ 0x308e04bd,
+ 0x24bd500c,
+ 0x44bd34bd,
+/* 0x03b0: init_unk_loop */
+ 0x0000657e,
+ 0xf400f6b0,
+ 0x010f0e0b,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x03c5: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x03d1: init_unk_done */
+ 0x0703b5e2,
+ 0x820804b5,
+ 0xcf020100,
+ 0x34bd0022,
+ 0x80082595,
+ 0xf601c000,
0x04bd0005,
- 0x98000e98,
- 0x207e010f,
- 0x2fbb0001,
+ 0x01c10080,
+ 0xbd0005f6,
+ 0x000e9804,
+ 0x7e010f98,
+ 0xbb000120,
+ 0x3fbb002f,
+ 0x010e9800,
+ 0x7e020f98,
+ 0x98000120,
+ 0xeffd050e,
+ 0x002ebb00,
+ 0x98003ebb,
+ 0x0f98020e,
+ 0x01207e03,
+ 0x070e9800,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0235b600,
+ 0x01d30080,
+ 0xbd0003f6,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb20834b6,
+ 0x02687e2f,
+ 0x002fbb00,
+ 0x0f003fbb,
+ 0x8effb23f,
+ 0xf0501d60,
+ 0x8f7e01e5,
+ 0x0c0f0000,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0xb23f0f00,
+ 0x1d608eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xffb2000f,
+ 0x501d9c8e,
+ 0x7e01e5f0,
+ 0x0f00008f,
+ 0x03147e01,
+ 0x8effb200,
+ 0xf0501da8,
+ 0x8f7e01e5,
+ 0xff0f0000,
+ 0x988effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0xb2020f00,
+ 0x1da88eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0x0003147e,
+ 0x85050498,
+ 0x98504000,
+ 0x64b60406,
+ 0x0056bb0f,
+/* 0x04e0: tpc_strand_init_tpc_loop */
+ 0x05705eb8,
+ 0x00657e00,
+ 0xbdf6b200,
+/* 0x04ed: tpc_strand_init_idx_loop */
+ 0x605eb874,
+ 0x7fb20005,
+ 0x00008f7e,
+ 0x05885eb8,
+ 0x082f9500,
+ 0x00008f7e,
+ 0x058c5eb8,
+ 0x082f9500,
+ 0x00008f7e,
+ 0x05905eb8,
+ 0x00657e00,
+ 0x06f5b600,
+ 0xb601f0b6,
+ 0x2fbb08f4,
0x003fbb00,
- 0x98010e98,
- 0x207e020f,
- 0x0e980001,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x0001207e,
- 0xfd070e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0x800235b6,
- 0xf601d300,
- 0x04bd0003,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb20834,
- 0x0002687e,
- 0x80003fbb,
- 0xf6020100,
- 0x04bd0003,
- 0x29f024bd,
- 0x3000801f,
- 0x0002f602,
-/* 0x0436: main */
- 0x31f404bd,
- 0x0028f400,
- 0x377e240d,
- 0x01f40000,
- 0x04e4b0f4,
- 0xfe1d18f4,
- 0x06020181,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x097e0018,
- 0x0ef40005,
-/* 0x0465: main_not_ctx_xfer */
- 0x10ef94d4,
- 0x7e01f5f0,
- 0xf40002f8,
-/* 0x0472: ih */
- 0x80f9c70e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x4a04bdf0,
- 0xaacf0200,
- 0x04abc400,
- 0x0d1f0bf4,
- 0x1a004e24,
- 0x4f00eecf,
- 0xffcf1900,
- 0x00047e00,
- 0x40010e00,
- 0x0ef61d00,
-/* 0x04af: ih_no_fifo */
- 0x4004bd00,
- 0x0af60100,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04cf: hub_barrier_done */
- 0x0f01f800,
- 0x040e9801,
- 0xb204febb,
- 0x94188eff,
- 0x008f7e40,
-/* 0x04e3: ctx_redswitch */
- 0x0f00f800,
- 0x85008020,
- 0x000ff601,
- 0x080e04bd,
-/* 0x04f0: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x00800200,
- 0x0ff60185,
- 0xf804bd00,
-/* 0x0509: ctx_xfer */
- 0x81008000,
- 0x000ff602,
- 0x11f404bd,
- 0x04e37e07,
-/* 0x0519: ctx_xfer_not_load */
- 0x02167e00,
- 0x8024bd00,
- 0xf60247fc,
+ 0xb60170b6,
+ 0x1bf40162,
+ 0x0050b7bf,
+ 0x0142b608,
+ 0x0fa81bf4,
+ 0x8effb23f,
+ 0xf0501d60,
+ 0x8f7e01e5,
+ 0x0d0f0000,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0x01008000,
+ 0x0003f602,
+ 0x24bd04bd,
+ 0x801f29f0,
+ 0xf6023000,
0x04bd0002,
- 0xb6012cf0,
- 0xfc800320,
- 0x02f6024a,
+/* 0x0574: main */
+ 0xf40031f4,
+ 0x240d0028,
+ 0x0000377e,
+ 0xb0f401f4,
+ 0x18f404e4,
+ 0x0181fe1d,
+ 0x20bd0602,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x0006477e,
+/* 0x05a3: main_not_ctx_xfer */
+ 0x94d40ef4,
+ 0xf5f010ef,
+ 0x02f87e01,
+ 0xc70ef400,
+/* 0x05b0: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e240d1f,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0x0e000004,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x05ed: ih_no_fifo */
+ 0x01004004,
+ 0xbd000af6,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x060d: hub_barrier_done */
+ 0x98010f01,
+ 0xfebb040e,
+ 0x8effb204,
+ 0x7e409418,
+ 0xf800008f,
+/* 0x0621: ctx_redswitch */
+ 0x80200f00,
+ 0xf6018500,
+ 0x04bd000f,
+/* 0x062e: ctx_redswitch_delay */
+ 0xe2b6080e,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x0647: ctx_xfer */
+ 0x8000f804,
+ 0xf6028100,
+ 0x04bd000f,
+ 0xc48effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x0711f400,
+ 0x0006217e,
+/* 0x0664: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0x0c0f04bd,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0xb23f0f00,
+ 0x1d608eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xffb2000f,
+ 0x501d9c8e,
+ 0x7e01e5f0,
+ 0x0f00008f,
+ 0x03147e01,
+ 0x01fcf000,
+ 0xb203f0b6,
+ 0x1da88eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xf001acf0,
+ 0x008b02a5,
+ 0x0c985000,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x7e000e01,
+ 0xf000013d,
+ 0x008b01ac,
+ 0x0c985040,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x7e08004e,
+ 0xf000013d,
0xa5f001ac,
- 0x00008b02,
+ 0x30008b04,
0x040c9850,
0xbb0fc4b6,
0x0c9800bc,
- 0x010d9800,
- 0x3d7e000e,
- 0xacf00001,
- 0x40008b01,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0x4e060f98,
- 0x3d7e0800,
- 0xacf00001,
- 0x04a5f001,
- 0x5030008b,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x020c9800,
- 0x98030d98,
- 0x004e080f,
- 0x013d7e02,
- 0x020a7e00,
- 0x0601f400,
-/* 0x05a3: ctx_xfer_post */
- 0x7e0712f4,
-/* 0x05a7: ctx_xfer_done */
- 0x7e000227,
- 0xf80004cf,
+ 0x030d9802,
+ 0x4e080f98,
+ 0x3d7e0200,
+ 0x0a7e0001,
+ 0x147e0002,
+ 0x01f40003,
+ 0x1a12f406,
+/* 0x073c: ctx_xfer_post */
+ 0x0002277e,
+ 0xffb20d0f,
+ 0x501da88e,
+ 0x7e01e5f0,
+ 0x7e00008f,
+/* 0x0753: ctx_xfer_done */
+ 0x7e000314,
+ 0xf800060d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
index 2a0b0f844299..fa618066441a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
@@ -29,6 +29,7 @@
#define GK100 0xe0
#define GK110 0xf0
#define GK208 0x108
+#define GM107 0x117
#define NV_PGRAPH_TRAPPED_ADDR 0x400704
#define NV_PGRAPH_TRAPPED_DATA_LO 0x400708
@@ -79,7 +80,9 @@
#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE 0x409718
#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK 0x40971c
#define NV_PGRAPH_FECS_MMCTX_QUEUE 0x409720
+#define NV_PGRAPH_FECS_MMIO_BASE 0x409724
#define NV_PGRAPH_FECS_MMIO_CTRL 0x409728
+#define NV_PGRAPH_FECS_MMIO_CTRL_BASE_ENABLE 0x00000001
#define NV_PGRAPH_FECS_MMIO_RDVAL 0x40972c
#define NV_PGRAPH_FECS_MMIO_WRVAL 0x409730
#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
@@ -147,6 +150,11 @@
#define NV_PGRAPH_GPCX_GPCCS_MYINDEX 0x41a618
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE 0x41a700
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE 0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_BASE 0x41a724
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL 0x41a728
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE 0x00000001
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_RDVAL 0x41a72c
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_WRVAL 0x41a730
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT 0x41a74c
#if CHIPSET < GK110
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
@@ -164,6 +172,29 @@
#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE 0x00000003
#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD 0x00000004
#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE 0x41aa04
+#define NV_PGRAPH_GPCX_GPCCS_TPC_STATUS 0x41acfc
+
+#define NV_PGRAPH_GPC0_TPC0 0x504000
+#define NV_PGRAPH_GPC0_TPC0__SIZE 0x000800
+
+#define NV_PGRAPH_GPC0_TPCX_STRAND_INDEX 0x501d60
+#define NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL 0x0000003f
+#define NV_PGRAPH_GPC0_TPCX_STRAND_DATA 0x501d98
+#define NV_PGRAPH_GPC0_TPCX_STRAND_SELECT 0x501d9c
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD 0x501da8
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SEEK 0x00000001
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_GET_INFO 0x00000002
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_ENABLE 0x0000000c
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_DISABLE 0x0000000d
+#define NV_PGRAPH_GPC0_TPCX_STRAND_MEM_BASE 0x501dc4
+
+#define NV_TPC_STRAND_INDEX 0x560
+#define NV_TPC_STRAND_CNT 0x570
+#define NV_TPC_STRAND_SAVE_SWBASE 0x588
+#define NV_TPC_STRAND_LOAD_SWBASE 0x58c
+#define NV_TPC_STRAND_WORDS 0x590
#define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
#define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2)
@@ -178,6 +209,7 @@
#define T_SAVE 7
#define T_LCHAN 8
#define T_LCTXH 9
+#define T_STRTPC 10
#if CHIPSET < GK208
#define imm32(reg,val) /*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1dd482e9da77..5606c25e5d02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -236,7 +236,7 @@ static int
gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
void *pdata, u32 size)
{
- struct gf100_gr_priv *priv = (void *)nv_engine(object);
+ struct gf100_gr_priv *priv = (void *)object->engine;
if (size >= sizeof(u32)) {
u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
nv_wr32(priv, 0x419e44, data);
@@ -260,8 +260,8 @@ gf100_gr_90c0_omthds[] = {
struct nvkm_oclass
gf100_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -1097,12 +1097,26 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
u32 subc = (addr & 0x00070000) >> 16;
u32 data = nv_rd32(priv, 0x400708);
u32 code = nv_rd32(priv, 0x400110);
- u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+ u32 class;
int chid;
+ if (nv_device(priv)->card_type < NV_E0 || subc < 4)
+ class = nv_rd32(priv, 0x404200 + (subc * 4));
+ else
+ class = 0x0000;
+
engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
+ if (stat & 0x00000001) {
+ /*
+ * notifier interrupt, only needed for cyclestats
+ * can be safely ignored
+ */
+ nv_wr32(priv, 0x400100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
if (stat & 0x00000010) {
handle = nvkm_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index aeeca1be9cf0..8af1a89eda84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -124,10 +124,12 @@ void gf100_gr_dtor(struct nvkm_object *);
int gf100_gr_init(struct nvkm_object *);
void gf100_gr_zbc_init(struct gf100_gr_priv *);
-int gk104_gr_fini(struct nvkm_object *, bool);
+int gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
int gk104_gr_init(struct nvkm_object *);
-int gk110_gr_fini(struct nvkm_object *, bool);
+int gm204_gr_init(struct nvkm_object *);
extern struct nvkm_ofuncs gf100_fermi_ofuncs;
@@ -136,6 +138,7 @@ extern struct nvkm_omthds gf100_gr_9097_omthds[];
extern struct nvkm_omthds gf100_gr_90c0_omthds[];
extern struct nvkm_oclass gf110_gr_sclass[];
extern struct nvkm_oclass gk110_gr_sclass[];
+extern struct nvkm_oclass gm204_gr_sclass[];
struct gf100_gr_init {
u32 addr;
@@ -247,4 +250,17 @@ extern const struct gf100_gr_init gk110_gr_init_tex_0[];
extern const struct gf100_gr_init gk110_gr_init_sm_0[];
extern const struct gf100_gr_init gk208_gr_init_gpc_unk_0[];
+
+extern const struct gf100_gr_init gm107_gr_init_scc_0[];
+extern const struct gf100_gr_init gm107_gr_init_prop_0[];
+extern const struct gf100_gr_init gm107_gr_init_setup_1[];
+extern const struct gf100_gr_init gm107_gr_init_zcull_0[];
+extern const struct gf100_gr_init gm107_gr_init_gpc_unk_1[];
+extern const struct gf100_gr_init gm107_gr_init_tex_0[];
+extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
+extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
+extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
+void gm107_gr_init_bios(struct gf100_gr_priv *);
+
+extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 5362c8176e64..8df73421c78c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -32,8 +32,8 @@
static struct nvkm_oclass
gf108_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 88beb491b7b8..ef76e2dd1d31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -32,8 +32,8 @@
struct nvkm_oclass
gf110_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 489fdd94b885..46f7844eca70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -34,8 +34,8 @@
static struct nvkm_oclass
gk104_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa040, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -310,6 +310,17 @@ gk104_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(priv);
}
+int
+gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(parent);
+ if (pmu)
+ pmu->pgob(pmu, false);
+ return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+}
+
#include "fuc/hubgk104.fuc3.h"
static struct gf100_gr_ucode
@@ -334,7 +345,7 @@ struct nvkm_oclass *
gk104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xe4),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 78e03ab1608e..f4cd8e5546af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -34,8 +34,8 @@
struct nvkm_oclass
gk110_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -173,43 +173,6 @@ gk110_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-int
-gk110_gr_fini(struct nvkm_object *object, bool suspend)
-{
- struct gf100_gr_priv *priv = (void *)object;
- static const struct {
- u32 addr;
- u32 data;
- } magic[] = {
- { 0x020520, 0xfffffffc },
- { 0x020524, 0xfffffffe },
- { 0x020524, 0xfffffffc },
- { 0x020524, 0xfffffff8 },
- { 0x020524, 0xffffffe0 },
- { 0x020530, 0xfffffffe },
- { 0x02052c, 0xfffffffa },
- { 0x02052c, 0xfffffff0 },
- { 0x02052c, 0xffffffc0 },
- { 0x02052c, 0xffffff00 },
- { 0x02052c, 0xfffffc00 },
- { 0x02052c, 0xfffcfc00 },
- { 0x02052c, 0xfff0fc00 },
- { 0x02052c, 0xff80fc00 },
- { 0x020528, 0xfffffffe },
- { 0x020528, 0xfffffffc },
- };
- int i;
-
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(magic); i++) {
- nv_wr32(priv, magic[i].addr, magic[i].data);
- nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
- }
-
- return nvkm_gr_fini(&priv->base, suspend);
-}
-
#include "fuc/hubgk110.fuc3.h"
struct gf100_gr_ucode
@@ -234,10 +197,10 @@ struct nvkm_oclass *
gk110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf0),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk110_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk110_grctx_oclass,
.sclass = gk110_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 5292c5a9a38c..9ff9eab0ccaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -102,10 +102,10 @@ struct nvkm_oclass *
gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf1),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk110_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk110b_grctx_oclass,
.sclass = gk110_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index ae6b853173b6..85f44a3d5d11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -34,10 +34,10 @@
static struct nvkm_oclass
gk208_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
- { 0xa1c0, &nvkm_object_ofuncs },
+ { KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
@@ -152,43 +152,6 @@ gk208_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-gk208_gr_fini(struct nvkm_object *object, bool suspend)
-{
- struct gf100_gr_priv *priv = (void *)object;
- static const struct {
- u32 addr;
- u32 data;
- } magic[] = {
- { 0x020520, 0xfffffffc },
- { 0x020524, 0xfffffffe },
- { 0x020524, 0xfffffffc },
- { 0x020524, 0xfffffff8 },
- { 0x020524, 0xffffffe0 },
- { 0x020530, 0xfffffffe },
- { 0x02052c, 0xfffffffa },
- { 0x02052c, 0xfffffff0 },
- { 0x02052c, 0xffffffc0 },
- { 0x02052c, 0xffffff00 },
- { 0x02052c, 0xfffffc00 },
- { 0x02052c, 0xfffcfc00 },
- { 0x02052c, 0xfff0fc00 },
- { 0x02052c, 0xff80fc00 },
- { 0x020528, 0xfffffffe },
- { 0x020528, 0xfffffffc },
- };
- int i;
-
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(magic); i++) {
- nv_wr32(priv, magic[i].addr, magic[i].data);
- nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
- }
-
- return nvkm_gr_fini(&priv->base, suspend);
-}
-
#include "fuc/hubgk208.fuc5.h"
static struct gf100_gr_ucode
@@ -213,10 +176,10 @@ struct nvkm_oclass *
gk208_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk208_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk208_grctx_oclass,
.sclass = gk208_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 213755534084..40ff5eb9180c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -26,8 +26,8 @@
static struct nvkm_oclass
gk20a_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa040, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 124492b8a2d6..a5ebd459bc24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -35,8 +35,8 @@
static struct nvkm_oclass
gm107_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -71,7 +71,7 @@ gm107_gr_init_ds_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_scc_0[] = {
{ 0x40803c, 1, 0x04, 0x00000010 },
{}
@@ -85,14 +85,14 @@ gm107_gr_init_sked_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_prop_0[] = {
{ 0x418408, 1, 0x04, 0x00000000 },
{ 0x4184a0, 1, 0x04, 0x00000000 },
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
@@ -100,7 +100,7 @@ gm107_gr_init_setup_1[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_zcull_0[] = {
{ 0x418910, 1, 0x04, 0x00010001 },
{ 0x418914, 1, 0x04, 0x00000301 },
@@ -111,7 +111,7 @@ gm107_gr_init_zcull_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000400 },
@@ -134,7 +134,7 @@ gm107_gr_init_tpccs_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -160,7 +160,7 @@ gm107_gr_init_pe_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419cc0, 2, 0x04, 0x00000000 },
@@ -206,14 +206,14 @@ gm107_gr_init_pes_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_wwdx_0[] = {
{ 0x41bfd4, 1, 0x04, 0x00800000 },
{ 0x41bfdc, 1, 0x04, 0x00000000 },
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_cbm_0[] = {
{ 0x41becc, 1, 0x04, 0x00000000 },
{}
@@ -291,7 +291,7 @@ gm107_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static void
+void
gm107_gr_init_bios(struct gf100_gr_priv *priv)
{
static const struct {
@@ -464,7 +464,7 @@ gm107_gr_oclass = &(struct gf100_gr_oclass) {
.cclass = &gm107_grctx_oclass,
.sclass = gm107_gr_sclass,
.mmio = gm107_gr_pack_mmio,
- .fecs.ucode = 0 ? &gm107_gr_fecs_ucode : NULL,
+ .fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,
.ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
new file mode 100644
index 000000000000..2f5eadd12a9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+struct nvkm_oclass
+gm204_gr_sclass[] = {
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
+ { MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct gf100_gr_init
+gm204_gr_init_main_0[] = {
+ { 0x400080, 1, 0x04, 0x003003e2 },
+ { 0x400088, 1, 0x04, 0xe007bfe7 },
+ { 0x40008c, 1, 0x04, 0x00060000 },
+ { 0x400090, 1, 0x04, 0x00000030 },
+ { 0x40013c, 1, 0x04, 0x003901f3 },
+ { 0x400140, 1, 0x04, 0x00000100 },
+ { 0x400144, 1, 0x04, 0x00000000 },
+ { 0x400148, 1, 0x04, 0x00000110 },
+ { 0x400138, 1, 0x04, 0x00000000 },
+ { 0x400130, 2, 0x04, 0x00000000 },
+ { 0x400124, 1, 0x04, 0x00000002 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_fe_0[] = {
+ { 0x40415c, 1, 0x04, 0x00000000 },
+ { 0x404170, 1, 0x04, 0x00000000 },
+ { 0x4041b4, 1, 0x04, 0x00000000 },
+ { 0x4041b8, 1, 0x04, 0x00000010 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_ds_0[] = {
+ { 0x40583c, 1, 0x04, 0x00000000 },
+ { 0x405844, 1, 0x04, 0x00ffffff },
+ { 0x40584c, 1, 0x04, 0x00000001 },
+ { 0x405850, 1, 0x04, 0x00000000 },
+ { 0x405900, 1, 0x04, 0x00000000 },
+ { 0x405908, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sked_0[] = {
+ { 0x407010, 1, 0x04, 0x00000000 },
+ { 0x407040, 1, 0x04, 0x80440434 },
+ { 0x407048, 1, 0x04, 0x00000008 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_tpccs_0[] = {
+ { 0x419d60, 1, 0x04, 0x0000003f },
+ { 0x419d88, 3, 0x04, 0x00000000 },
+ { 0x419dc4, 1, 0x04, 0x00000000 },
+ { 0x419dc8, 1, 0x04, 0x00000501 },
+ { 0x419dd0, 1, 0x04, 0x00000000 },
+ { 0x419dd4, 1, 0x04, 0x00000100 },
+ { 0x419dd8, 1, 0x04, 0x00000001 },
+ { 0x419ddc, 1, 0x04, 0x00000002 },
+ { 0x419de0, 1, 0x04, 0x00000001 },
+ { 0x419de8, 1, 0x04, 0x000000cc },
+ { 0x419dec, 1, 0x04, 0x00000000 },
+ { 0x419df0, 1, 0x04, 0x000000cc },
+ { 0x419df4, 1, 0x04, 0x00000000 },
+ { 0x419d0c, 1, 0x04, 0x00000000 },
+ { 0x419d10, 1, 0x04, 0x00000014 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_pe_0[] = {
+ { 0x419900, 1, 0x04, 0x000000ff },
+ { 0x419810, 1, 0x04, 0x00000000 },
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x419838, 1, 0x04, 0x000000ff },
+ { 0x419850, 1, 0x04, 0x00000004 },
+ { 0x419854, 2, 0x04, 0x00000000 },
+ { 0x419894, 3, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sm_0[] = {
+ { 0x419e30, 1, 0x04, 0x000000ff },
+ { 0x419e00, 1, 0x04, 0x00000000 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419ee8, 1, 0x04, 0x00000091 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x000c1810 },
+ { 0x419ed8, 1, 0x04, 0x00000000 },
+ { 0x419ee0, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_l1c_1[] = {
+ { 0x419cf8, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sm_1[] = {
+ { 0x419f74, 1, 0x04, 0x00055155 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_l1c_2[] = {
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x3f006022 },
+ { 0x419c88, 1, 0x04, 0x00210000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_pes_0[] = {
+ { 0x41be50, 1, 0x04, 0x000000ff },
+ { 0x41be04, 1, 0x04, 0x00000000 },
+ { 0x41be08, 1, 0x04, 0x00000004 },
+ { 0x41be0c, 1, 0x04, 0x00000008 },
+ { 0x41be10, 1, 0x04, 0x2e3b8bc7 },
+ { 0x41be14, 2, 0x04, 0x00000000 },
+ { 0x41be3c, 5, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_be_0[] = {
+ { 0x408890, 1, 0x04, 0x000000ff },
+ { 0x40880c, 1, 0x04, 0x00000000 },
+ { 0x408850, 1, 0x04, 0x00000004 },
+ { 0x408878, 1, 0x04, 0x01b4201c },
+ { 0x40887c, 1, 0x04, 0x80004c55 },
+ { 0x408880, 1, 0x04, 0x0018c258 },
+ { 0x408884, 1, 0x04, 0x0000160f },
+ { 0x408974, 1, 0x04, 0x000000ff },
+ { 0x408910, 9, 0x04, 0x00000000 },
+ { 0x408950, 1, 0x04, 0x00000000 },
+ { 0x408954, 1, 0x04, 0x0000ffff },
+ { 0x408958, 1, 0x04, 0x00000034 },
+ { 0x40895c, 1, 0x04, 0x84b17403 },
+ { 0x408960, 1, 0x04, 0x04c1884f },
+ { 0x408964, 1, 0x04, 0x04714445 },
+ { 0x408968, 1, 0x04, 0x0280802f },
+ { 0x40896c, 1, 0x04, 0x04304856 },
+ { 0x408970, 1, 0x04, 0x00012800 },
+ { 0x408984, 1, 0x04, 0x00000000 },
+ { 0x408988, 1, 0x04, 0x08040201 },
+ { 0x40898c, 1, 0x04, 0x80402010 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_gr_pack_mmio[] = {
+ { gm204_gr_init_main_0 },
+ { gm204_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gm204_gr_init_ds_0 },
+ { gm107_gr_init_scc_0 },
+ { gm204_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gm107_gr_init_prop_0 },
+ { gk208_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gm107_gr_init_setup_1 },
+ { gm107_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gm107_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gm204_gr_init_tpccs_0 },
+ { gm107_gr_init_tex_0 },
+ { gm204_gr_init_pe_0 },
+ { gm107_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gm204_gr_init_sm_0 },
+ { gm204_gr_init_l1c_1 },
+ { gm204_gr_init_sm_1 },
+ { gm204_gr_init_l1c_2 },
+ { gm204_gr_init_pes_0 },
+ { gm107_gr_init_wwdx_0 },
+ { gm107_gr_init_cbm_0 },
+ { gm204_gr_init_be_0 },
+ {}
+};
+
+const struct gf100_gr_pack *
+gm204_gr_data[] = {
+ gm204_gr_pack_mmio,
+ NULL
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+{
+ return 0;
+}
+
+int
+gm204_gr_init(struct nvkm_object *object)
+{
+ struct gf100_gr_oclass *oclass = (void *)object->oclass;
+ struct gf100_gr_priv *priv = (void *)object;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, ppc, rop;
+ int ret, i;
+ u32 tmp;
+
+ ret = nvkm_gr_init(&priv->base);
+ if (ret)
+ return ret;
+
+ tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
+ nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+ nv_wr32(priv, 0x418890, 0x00000000);
+ nv_wr32(priv, 0x418894, 0x00000000);
+ nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
+ nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+
+ /*XXX: belongs in fb */
+ nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
+ nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+
+ gf100_gr_mmio(priv, oclass->mmio);
+
+ gm107_gr_init_bios(priv);
+
+ nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
+ priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ priv->tpc_total);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+ nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
+
+ nv_wr32(priv, 0x400500, 0x00010001);
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
+ nv_wr32(priv, 0x400124, 0x00000002);
+ nv_wr32(priv, 0x409c24, 0x000e0000);
+ nv_wr32(priv, 0x405848, 0xc0000000);
+ nv_wr32(priv, 0x40584c, 0x00000001);
+ nv_wr32(priv, 0x404000, 0xc0000000);
+ nv_wr32(priv, 0x404600, 0xc0000000);
+ nv_wr32(priv, 0x408030, 0xc0000000);
+ nv_wr32(priv, 0x404490, 0xc0000000);
+ nv_wr32(priv, 0x406018, 0xc0000000);
+ nv_wr32(priv, 0x407020, 0x40000000);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x405844, 0x00ffffff);
+ nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
+ nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ }
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400118, 0xffffffff);
+ nv_wr32(priv, 0x400130, 0xffffffff);
+ nv_wr32(priv, 0x40011c, 0xffffffff);
+ nv_wr32(priv, 0x400134, 0xffffffff);
+
+ nv_wr32(priv, 0x400054, 0x2c350f63);
+
+ gf100_gr_zbc_init(priv);
+
+ return gm204_gr_init_ctxctl(priv);
+}
+
+struct nvkm_oclass *
+gm204_gr_oclass = &(struct gf100_gr_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gm204_gr_init,
+ .fini = _nvkm_gr_fini,
+ },
+ .cclass = &gm204_grctx_oclass,
+ .sclass = gm204_gr_sclass,
+ .mmio = gm204_gr_pack_mmio,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
new file mode 100644
index 000000000000..04b9733d146a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+struct nvkm_oclass *
+gm206_gr_oclass = &(struct gf100_gr_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x26),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gm204_gr_init,
+ .fini = _nvkm_gr_fini,
+ },
+ .cclass = &gm206_grctx_oclass,
+ .sclass = gm204_gr_sclass,
+ .mmio = gm204_gr_pack_mmio,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 1fbd93bbb561..f9d0eb5647fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -52,7 +52,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 start = offset & ~0x00000fff;
u32 fetch = limit - start;
- if (nvbios_extend(bios, limit) > 0) {
+ if (nvbios_extend(bios, limit) >= 0) {
int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
if (ret == fetch)
return fetch;
@@ -73,7 +73,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 start = offset & ~0xfff;
u32 fetch = 0;
- if (nvbios_extend(bios, limit) > 0) {
+ if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
int ret = nouveau_acpi_get_bios_chunk(bios->data,
start + fetch,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index b8853bf16b23..7622b41619a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -29,7 +29,7 @@ struct nvkm_hwsq {
u32 data;
struct {
u8 data[512];
- u8 size;
+ u16 size;
} c;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 3394a5ea8a9f..ebf709c27e3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -11,17 +11,34 @@ struct hwsq {
struct hwsq_reg {
int sequence;
bool force;
- u32 addr[2];
+ u32 addr;
+ u32 stride; /* in bytes */
+ u32 mask;
u32 data;
};
static inline struct hwsq_reg
+hwsq_stride(u32 addr, u32 stride, u32 mask)
+{
+ return (struct hwsq_reg) {
+ .sequence = 0,
+ .force = 0,
+ .addr = addr,
+ .stride = stride,
+ .mask = mask,
+ .data = 0xdeadbeef,
+ };
+}
+
+static inline struct hwsq_reg
hwsq_reg2(u32 addr1, u32 addr2)
{
return (struct hwsq_reg) {
.sequence = 0,
.force = 0,
- .addr = { addr1, addr2 },
+ .addr = addr1,
+ .stride = addr2 - addr1,
+ .mask = 0x3,
.data = 0xdeadbeef,
};
}
@@ -29,7 +46,14 @@ hwsq_reg2(u32 addr1, u32 addr2)
static inline struct hwsq_reg
hwsq_reg(u32 addr)
{
- return hwsq_reg2(addr, addr);
+ return (struct hwsq_reg) {
+ .sequence = 0,
+ .force = 0,
+ .addr = addr,
+ .stride = 0,
+ .mask = 0x1,
+ .data = 0xdeadbeef,
+ };
}
static inline int
@@ -62,18 +86,24 @@ static inline u32
hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
{
if (reg->sequence != ram->sequence)
- reg->data = nv_rd32(ram->subdev, reg->addr[0]);
+ reg->data = nv_rd32(ram->subdev, reg->addr);
return reg->data;
}
static inline void
hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
{
+ u32 mask, off = 0;
+
reg->sequence = ram->sequence;
reg->data = data;
- if (reg->addr[0] != reg->addr[1])
- nvkm_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
- nvkm_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+
+ for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
+ if (mask & 1)
+ nvkm_hwsq_wr32(ram->hwsq, reg->addr+off, reg->data);
+
+ off += reg->stride;
+ }
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index b24a9cc04b73..39a83d82e0cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -184,7 +184,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nv_debug(clk, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
- if (pfb->ram->calc) {
+ if (pfb->ram && pfb->ram->calc) {
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = pfb->ram->calc(pfb, khz);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 14a51a9ff7d0..7c63abf11e22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -5,7 +5,7 @@ struct nvkm_pll_vals;
struct nv04_devinit_priv {
struct nvkm_devinit base;
- u8 owner;
+ int owner;
};
int nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 904d601e8a50..d6be4c6c5408 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -37,7 +37,6 @@ nvkm-y += nvkm/subdev/fb/ramgt215.o
nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
-nvkm-y += nvkm/subdev/fb/ramgk20a.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 16589fa613cd..61fde43dab71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -55,9 +55,11 @@ _nvkm_fb_fini(struct nvkm_object *object, bool suspend)
struct nvkm_fb *pfb = (void *)object;
int ret;
- ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
- if (ret && suspend)
- return ret;
+ if (pfb->ram) {
+ ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
+ if (ret && suspend)
+ return ret;
+ }
return nvkm_subdev_fini(&pfb->base, suspend);
}
@@ -72,9 +74,11 @@ _nvkm_fb_init(struct nvkm_object *object)
if (ret)
return ret;
- ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
- if (ret)
- return ret;
+ if (pfb->ram) {
+ ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
+ if (ret)
+ return ret;
+ }
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
@@ -91,9 +95,12 @@ _nvkm_fb_dtor(struct nvkm_object *object)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
nvkm_mm_fini(&pfb->tags);
- nvkm_mm_fini(&pfb->vram);
- nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+ if (pfb->ram) {
+ nvkm_mm_fini(&pfb->vram);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+ }
+
nvkm_subdev_destroy(&pfb->base);
}
@@ -127,6 +134,9 @@ nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
pfb->memtype_valid = impl->memtype;
+ if (!impl->ram)
+ return 0;
+
ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
if (ret) {
nv_fatal(pfb, "error detecting memory configuration!!\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 6762847c05e8..a5d7857d3898 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -65,5 +65,4 @@ gk20a_fb_oclass = &(struct nvkm_fb_impl) {
.fini = _nvkm_fb_fini,
},
.memtype = gf100_fb_memtype_valid,
- .ram = &gk20a_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index d82da02daa1f..485c4b64819a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -32,7 +32,6 @@ extern struct nvkm_oclass gt215_ram_oclass;
extern struct nvkm_oclass mcp77_ram_oclass;
extern struct nvkm_oclass gf100_ram_oclass;
extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gk20a_ram_oclass;
extern struct nvkm_oclass gm107_ram_oclass;
int nvkm_sddr2_calc(struct nvkm_ram *ram);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
deleted file mode 100644
index 5f30db140b47..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gk20a_mem {
- struct nvkm_mem base;
- void *cpuaddr;
- dma_addr_t handle;
-};
-#define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
-
-static void
-gk20a_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
-{
- struct device *dev = nv_device_base(nv_device(pfb));
- struct gk20a_mem *mem = to_gk20a_mem(*pmem);
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- if (likely(mem->cpuaddr))
- dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
- mem->cpuaddr, mem->handle);
-
- kfree(mem->base.pages);
- kfree(mem);
-}
-
-static int
-gk20a_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct device *dev = nv_device_base(nv_device(pfb));
- struct gk20a_mem *mem;
- u32 type = memtype & 0xff;
- u32 npages, order;
- int i;
-
- nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
- align, ncmin);
-
- npages = size >> PAGE_SHIFT;
- if (npages == 0)
- npages = 1;
-
- if (align == 0)
- align = PAGE_SIZE;
- align >>= PAGE_SHIFT;
-
- /* round alignment to the next power of 2, if needed */
- order = fls(align);
- if ((align & (align - 1)) == 0)
- order--;
- align = BIT(order);
-
- /* ensure returned address is correctly aligned */
- npages = max(align, npages);
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mem->base.size = npages;
- mem->base.memtype = type;
-
- mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
- if (!mem->base.pages) {
- kfree(mem);
- return -ENOMEM;
- }
-
- *pmem = &mem->base;
-
- mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
- &mem->handle, GFP_KERNEL);
- if (!mem->cpuaddr) {
- nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
- gk20a_ram_put(pfb, pmem);
- return -ENOMEM;
- }
-
- align <<= PAGE_SHIFT;
-
- /* alignment check */
- if (unlikely(mem->handle & (align - 1)))
- nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
- &mem->handle, align);
-
- nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
- npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
-
- for (i = 0; i < npages; i++)
- mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
-
- mem->base.offset = (u64)mem->base.pages[0];
- return 0;
-}
-
-static int
-gk20a_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 datasize,
- struct nvkm_object **pobject)
-{
- struct nvkm_ram *ram;
- int ret;
-
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->size = get_num_physpages() << PAGE_SHIFT;
-
- ram->get = gk20a_ram_get;
- ram->put = gk20a_ram_put;
- return 0;
-}
-
-struct nvkm_oclass
-gk20a_ram_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index ba19158a5912..0b256aa4960f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -45,10 +45,8 @@ gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ret = nvkm_fuse_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- if (ret)
- return ret;
- return 0;
+ return ret;
}
struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index e6f35abe7879..13bb7fc0a569 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -2,3 +2,4 @@ nvkm-y += nvkm/subdev/instmem/base.o
nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
+nvkm-y += nvkm/subdev/instmem/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
new file mode 100644
index 000000000000..dd0994d9ebfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * GK20A does not have dedicated video memory, and to accurately represent this
+ * fact Nouveau will not create a RAM device for it. Therefore its instmem
+ * implementation must be done directly on top of system memory, while providing
+ * coherent read and write operations.
+ *
+ * Instmem can be allocated through two means:
+ * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
+ * pages contiguous to the GPU. This is the preferred way.
+ * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
+ * contiguous memory.
+ *
+ * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
+ * GPU path) to ensure these operations are coherent for the GPU. This allows us
+ * to use more "relaxed" allocation parameters when using the DMA API, since we
+ * never need a kernel mapping.
+ */
+
+#include <subdev/fb.h>
+#include <core/mm.h>
+#include <core/device.h>
+
+#ifdef __KERNEL__
+#include <linux/dma-attrs.h>
+#include <linux/iommu.h>
+#include <nouveau_platform.h>
+#endif
+
+#include "priv.h"
+
+struct gk20a_instobj_priv {
+ struct nvkm_instobj base;
+ /* Must be second member here - see nouveau_gpuobj_map_vm() */
+ struct nvkm_mem *mem;
+ /* Pointed by mem */
+ struct nvkm_mem _mem;
+};
+
+/*
+ * Used for objects allocated using the DMA API
+ */
+struct gk20a_instobj_dma {
+ struct gk20a_instobj_priv base;
+
+ void *cpuaddr;
+ dma_addr_t handle;
+ struct nvkm_mm_node r;
+};
+
+/*
+ * Used for objects flattened using the IOMMU API
+ */
+struct gk20a_instobj_iommu {
+ struct gk20a_instobj_priv base;
+
+ /* array of base.mem->size pages */
+ struct page *pages[];
+};
+
+struct gk20a_instmem_priv {
+ struct nvkm_instmem base;
+ spinlock_t lock;
+ u64 addr;
+
+ /* Only used if IOMMU if present */
+ struct mutex *mm_mutex;
+ struct nvkm_mm *mm;
+ struct iommu_domain *domain;
+ unsigned long iommu_pgshift;
+
+ /* Only used by DMA API */
+ struct dma_attrs attrs;
+};
+
+/*
+ * Use PRAMIN to read/write data and avoid coherency issues.
+ * PRAMIN uses the GPU path and ensures data will always be coherent.
+ *
+ * A dynamic mapping based solution would be desirable in the future, but
+ * the issue remains of how to maintain coherency efficiently. On ARM it is
+ * not easy (if possible at all?) to create uncached temporary mappings.
+ */
+
+static u32
+gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+{
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
+ struct gk20a_instobj_priv *node = (void *)object;
+ unsigned long flags;
+ u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+ u32 data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (unlikely(priv->addr != base)) {
+ nv_wr32(priv, 0x001700, base >> 16);
+ priv->addr = base;
+ }
+ data = nv_rd32(priv, 0x700000 + addr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return data;
+}
+
+static void
+gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+{
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
+ struct gk20a_instobj_priv *node = (void *)object;
+ unsigned long flags;
+ u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (unlikely(priv->addr != base)) {
+ nv_wr32(priv, 0x001700, base >> 16);
+ priv->addr = base;
+ }
+ nv_wr32(priv, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+{
+ struct gk20a_instobj_dma *node = (void *)_node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct device *dev = nv_device_base(nv_device(priv));
+
+ if (unlikely(!node->cpuaddr))
+ return;
+
+ dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
+ node->handle, &priv->attrs);
+}
+
+static void
+gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+{
+ struct gk20a_instobj_iommu *node = (void *)_node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct nvkm_mm_node *r;
+ int i;
+
+ if (unlikely(list_empty(&_node->mem->regions)))
+ return;
+
+ r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+ rl_entry);
+
+ /* clear bit 34 to unmap pages */
+ r->offset &= ~BIT(34 - priv->iommu_pgshift);
+
+ /* Unmap pages from GPU address space and free them */
+ for (i = 0; i < _node->mem->size; i++) {
+ iommu_unmap(priv->domain,
+ (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+ __free_page(node->pages[i]);
+ }
+
+ /* Release area from GPU address space */
+ mutex_lock(priv->mm_mutex);
+ nvkm_mm_free(priv->mm, &r);
+ mutex_unlock(priv->mm_mutex);
+}
+
+static void
+gk20a_instobj_dtor(struct nvkm_object *object)
+{
+ struct gk20a_instobj_priv *node = (void *)object;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+
+ if (priv->domain)
+ gk20a_instobj_dtor_iommu(node);
+ else
+ gk20a_instobj_dtor_dma(node);
+
+ nvkm_instobj_destroy(&node->base);
+}
+
+static int
+gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 npages, u32 align,
+ struct gk20a_instobj_priv **_node)
+{
+ struct gk20a_instobj_dma *node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct device *dev = nv_device_base(nv_device(parent));
+ int ret;
+
+ ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
+ (void **)&node);
+ *_node = &node->base;
+ if (ret)
+ return ret;
+
+ node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
+ &node->handle, GFP_KERNEL,
+ &priv->attrs);
+ if (!node->cpuaddr) {
+ nv_error(priv, "cannot allocate DMA memory\n");
+ return -ENOMEM;
+ }
+
+ /* alignment check */
+ if (unlikely(node->handle & (align - 1)))
+ nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
+ &node->handle, align);
+
+ /* present memory for being mapped using small pages */
+ node->r.type = 12;
+ node->r.offset = node->handle >> 12;
+ node->r.length = (npages << PAGE_SHIFT) >> 12;
+
+ node->base._mem.offset = node->handle;
+
+ INIT_LIST_HEAD(&node->base._mem.regions);
+ list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+
+ return 0;
+}
+
+static int
+gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 npages, u32 align,
+ struct gk20a_instobj_priv **_node)
+{
+ struct gk20a_instobj_iommu *node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct nvkm_mm_node *r;
+ int ret;
+ int i;
+
+ ret = nvkm_instobj_create_(parent, engine, oclass,
+ sizeof(*node) + sizeof(node->pages[0]) * npages,
+ (void **)&node);
+ *_node = &node->base;
+ if (ret)
+ return ret;
+
+ /* Allocate backing memory */
+ for (i = 0; i < npages; i++) {
+ struct page *p = alloc_page(GFP_KERNEL);
+
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+ node->pages[i] = p;
+ }
+
+ mutex_lock(priv->mm_mutex);
+ /* Reserve area from GPU address space */
+ ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
+ align >> priv->iommu_pgshift, &r);
+ mutex_unlock(priv->mm_mutex);
+ if (ret) {
+ nv_error(priv, "virtual space is full!\n");
+ goto free_pages;
+ }
+
+ /* Map into GPU address space */
+ for (i = 0; i < npages; i++) {
+ struct page *p = node->pages[i];
+ u32 offset = (r->offset + i) << priv->iommu_pgshift;
+
+ ret = iommu_map(priv->domain, offset, page_to_phys(p),
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+
+ while (i-- > 0) {
+ offset -= PAGE_SIZE;
+ iommu_unmap(priv->domain, offset, PAGE_SIZE);
+ }
+ goto release_area;
+ }
+ }
+
+ /* Bit 34 tells that an address is to be resolved through the IOMMU */
+ r->offset |= BIT(34 - priv->iommu_pgshift);
+
+ node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+
+ INIT_LIST_HEAD(&node->base._mem.regions);
+ list_add_tail(&r->rl_entry, &node->base._mem.regions);
+
+ return 0;
+
+release_area:
+ mutex_lock(priv->mm_mutex);
+ nvkm_mm_free(priv->mm, &r);
+ mutex_unlock(priv->mm_mutex);
+
+free_pages:
+ for (i = 0; i < npages && node->pages[i] != NULL; i++)
+ __free_page(node->pages[i]);
+
+ return ret;
+}
+
+static int
+gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 _size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_instobj_args *args = data;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct gk20a_instobj_priv *node;
+ u32 size, align;
+ int ret;
+
+ nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
+ priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+
+ /* Round size and align to page bounds */
+ size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
+ align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+
+ if (priv->domain)
+ ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
+ size >> PAGE_SHIFT, align, &node);
+ else
+ ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
+ size >> PAGE_SHIFT, align, &node);
+ *pobject = nv_object(node);
+ if (ret)
+ return ret;
+
+ node->mem = &node->_mem;
+
+ /* present memory for being mapped using small pages */
+ node->mem->size = size >> 12;
+ node->mem->memtype = 0;
+ node->mem->page_shift = 12;
+
+ node->base.addr = node->mem->offset;
+ node->base.size = size;
+
+ nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+ size, align, node->mem->offset);
+
+ return 0;
+}
+
+static struct nvkm_instobj_impl
+gk20a_instobj_oclass = {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_instobj_ctor,
+ .dtor = gk20a_instobj_dtor,
+ .init = _nvkm_instobj_init,
+ .fini = _nvkm_instobj_fini,
+ .rd32 = gk20a_instobj_rd32,
+ .wr32 = gk20a_instobj_wr32,
+ },
+};
+
+
+
+static int
+gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+{
+ struct gk20a_instmem_priv *priv = (void *)object;
+ priv->addr = ~0ULL;
+ return nvkm_instmem_fini(&priv->base, suspend);
+}
+
+static int
+gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk20a_instmem_priv *priv;
+ struct nouveau_platform_device *plat;
+ int ret;
+
+ ret = nvkm_instmem_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&priv->lock);
+
+ plat = nv_device_to_platform(nv_device(parent));
+ if (plat->gpu->iommu.domain) {
+ priv->domain = plat->gpu->iommu.domain;
+ priv->mm = plat->gpu->iommu.mm;
+ priv->iommu_pgshift = plat->gpu->iommu.pgshift;
+ priv->mm_mutex = &plat->gpu->iommu.mutex;
+
+ nv_info(priv, "using IOMMU\n");
+ } else {
+ init_dma_attrs(&priv->attrs);
+ /*
+ * We will access instmem through PRAMIN and thus do not need a
+ * consistent CPU pointer or kernel mapping
+ */
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
+ dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+
+ nv_info(priv, "using DMA API\n");
+ }
+
+ return 0;
+}
+
+struct nvkm_oclass *
+gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0xea),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_instmem_ctor,
+ .dtor = _nvkm_instmem_dtor,
+ .init = _nvkm_instmem_init,
+ .fini = gk20a_instmem_fini,
+ },
+ .instobj = &gk20a_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 8e7cc6200d60..7fb5ea0314cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -136,7 +136,8 @@ gf100_ltc_dtor(struct nvkm_object *object)
struct nvkm_ltc_priv *priv = (void *)object;
nvkm_mm_fini(&priv->tags);
- nvkm_mm_free(&pfb->vram, &priv->tag_ram);
+ if (pfb->ram)
+ nvkm_mm_free(&pfb->vram, &priv->tag_ram);
nvkm_ltc_destroy(priv);
}
@@ -149,6 +150,12 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
u32 tag_size, tag_margin, tag_align;
int ret;
+ /* No VRAM, no tags for now. */
+ if (!pfb->ram) {
+ priv->num_tags = 0;
+ goto mm_init;
+ }
+
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
@@ -183,6 +190,7 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
priv->tag_base = tag_base;
}
+mm_init:
ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 42cac13ca629..f20e4ca87e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -182,7 +182,7 @@ mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
{
u64 desc = *(u64 *)data;
if ((desc & 0xf0) != 0xf0)
- nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+ nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 9a150d520225..7081d6a9b95f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -4,5 +4,6 @@ nvkm-y += nvkm/subdev/pmu/gt215.o
nvkm-y += nvkm/subdev/pmu/gf100.o
nvkm-y += nvkm/subdev/pmu/gf110.o
nvkm-y += nvkm/subdev/pmu/gk104.o
+nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
new file mode 100644
index 000000000000..89bb94b0af8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf110_pmu_code gk110_pmu_code
+#define gf110_pmu_data gk110_pmu_data
+#include "priv.h"
+#include "fuc/gf110.fuc4.h"
+
+#include <subdev/timer.h>
+
+void
+gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
+{
+ static const struct {
+ u32 addr;
+ u32 data;
+ } magic[] = {
+ { 0x020520, 0xfffffffc },
+ { 0x020524, 0xfffffffe },
+ { 0x020524, 0xfffffffc },
+ { 0x020524, 0xfffffff8 },
+ { 0x020524, 0xffffffe0 },
+ { 0x020530, 0xfffffffe },
+ { 0x02052c, 0xfffffffa },
+ { 0x02052c, 0xfffffff0 },
+ { 0x02052c, 0xffffffc0 },
+ { 0x02052c, 0xffffff00 },
+ { 0x02052c, 0xfffffc00 },
+ { 0x02052c, 0xfffcfc00 },
+ { 0x02052c, 0xfff0fc00 },
+ { 0x02052c, 0xff80fc00 },
+ { 0x020528, 0xfffffffe },
+ { 0x020528, 0xfffffffc },
+ };
+ int i;
+
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
+ nv_rd32(pmu, 0x000200);
+ nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+ msleep(50);
+
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(magic); i++) {
+ nv_wr32(pmu, magic[i].addr, magic[i].data);
+ nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+ }
+
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
+ nv_rd32(pmu, 0x000200);
+}
+
+struct nvkm_oclass *
+gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xf0),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
+ },
+ .code.data = gk110_pmu_code,
+ .code.size = sizeof(gk110_pmu_code),
+ .data.data = gk110_pmu_data,
+ .data.size = sizeof(gk110_pmu_data),
+ .pgob = gk110_pmu_pgob,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 6f9c09af1a49..b14134ef9ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -37,4 +37,5 @@ gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
+ .pgob = gk110_pmu_pgob,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index a49934bbe637..594f746e68f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -159,7 +159,7 @@ resched:
nvkm_timer_alarm(priv, 100000000, alarm);
}
-int
+static int
gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_pmu *pmu = (void *)object;
@@ -170,7 +170,7 @@ gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
return nvkm_subdev_fini(&pmu->base, suspend);
}
-int
+static int
gk20a_pmu_init(struct nvkm_object *object)
{
struct nvkm_pmu *pmu = (void *)object;
@@ -192,7 +192,8 @@ gk20a_pmu_init(struct nvkm_object *object)
return ret;
}
-struct gk20a_pmu_dvfs_data gk20a_dvfs_data= {
+static struct gk20a_pmu_dvfs_data
+gk20a_dvfs_data= {
.p_load_target = 70,
.p_load_max = 90,
.p_smooth = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 998410563bfd..799e7c8b88f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -40,4 +40,6 @@ struct nvkm_pmu_impl {
void (*pgob)(struct nvkm_pmu *, bool);
};
+
+void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index a94b11f7859d..b41965c2888d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -271,18 +271,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.best_encoder = omap_connector_attached_encoder,
};
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- /* TODO: enable when supported in dss */
- VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
int connector_type, struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index b0566a1ca28f..f456544bf300 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -28,7 +28,6 @@
struct omap_crtc {
struct drm_crtc base;
- struct drm_plane *plane;
const char *name;
int pipe;
@@ -46,7 +45,6 @@ struct omap_crtc {
struct omap_video_timings timings;
bool enabled;
- bool full_update;
struct omap_drm_apply apply;
@@ -74,8 +72,14 @@ struct omap_crtc {
* XXX maybe fold into apply_work??
*/
struct work_struct page_flip_work;
+
+ bool ignore_digit_sync_lost;
};
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
uint32_t pipe2vbl(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -83,6 +87,22 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+/* -----------------------------------------------------------------------------
+ * DSS Manager Functions
+ */
+
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
@@ -122,7 +142,63 @@ static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
{
}
-static void set_enabled(struct drm_crtc *crtc, bool enable);
+/* Called only from CRTC pre_apply and suspend/resume handlers. */
+static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait;
+ u32 framedone_irq, vsync_irq;
+ int ret;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ /*
+ * Digit output produces some sync lost interrupts during the
+ * first frame when enabling, so we need to ignore those.
+ */
+ omap_crtc->ignore_digit_sync_lost = true;
+ }
+
+ framedone_irq = dispc_mgr_get_framedone_irq(channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(channel);
+
+ if (enable) {
+ wait = omap_irq_wait_init(dev, vsync_irq, 1);
+ } else {
+ /*
+ * When we disable the digit output, we need to wait for
+ * FRAMEDONE to know that DISPC has finished with the output.
+ *
+ * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
+ * that case we need to use vsync interrupt, and wait for both
+ * even and odd frames.
+ */
+
+ if (framedone_irq)
+ wait = omap_irq_wait_init(dev, framedone_irq, 1);
+ else
+ wait = omap_irq_wait_init(dev, vsync_irq, 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ omap_crtc->ignore_digit_sync_lost = false;
+ /* make sure the irq handler sees the value above */
+ mb();
+ }
+}
+
static int omap_crtc_enable(struct omap_overlay_manager *mgr)
{
@@ -131,7 +207,7 @@ static int omap_crtc_enable(struct omap_overlay_manager *mgr)
dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
dispc_mgr_set_timings(omap_crtc->channel,
&omap_crtc->timings);
- set_enabled(&omap_crtc->base, true);
+ omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
@@ -140,7 +216,7 @@ static void omap_crtc_disable(struct omap_overlay_manager *mgr)
{
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
- set_enabled(&omap_crtc->base, false);
+ omap_crtc_set_enabled(&omap_crtc->base, false);
}
static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
@@ -149,7 +225,6 @@ static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
DBG("%s", omap_crtc->name);
omap_crtc->timings = *timings;
- omap_crtc->full_update = true;
}
static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
@@ -174,19 +249,201 @@ static void omap_crtc_unregister_framedone_handler(
}
static const struct dss_mgr_ops mgr_ops = {
- .connect = omap_crtc_connect,
- .disconnect = omap_crtc_disconnect,
- .start_update = omap_crtc_start_update,
- .enable = omap_crtc_enable,
- .disable = omap_crtc_disable,
- .set_timings = omap_crtc_set_timings,
- .set_lcd_config = omap_crtc_set_lcd_config,
- .register_framedone_handler = omap_crtc_register_framedone_handler,
- .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+ .connect = omap_crtc_connect,
+ .disconnect = omap_crtc_disconnect,
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
};
-/*
- * CRTC funcs:
+/* -----------------------------------------------------------------------------
+ * Apply Logic
+ */
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+
+ if (omap_crtc->ignore_digit_sync_lost) {
+ irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+ if (!irqstatus)
+ return;
+ }
+
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ drm_modeset_lock(&crtc->mutex, NULL);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ dispc_mgr_go(channel);
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ drm_modeset_unlock(&crtc->mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ unsigned int i;
+
+ DBG("%s: enabled=%d", omap_crtc->name, omap_crtc->enabled);
+
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+
+ if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
+ omap_encoder_set_enabled(omap_crtc->current_encoder, false);
+
+ omap_crtc->current_encoder = encoder;
+
+ if (!omap_crtc->enabled) {
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ }
+ }
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+void omap_crtc_flush(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ int loops = 0;
+
+ while (!list_empty(&omap_crtc->pending_applies) ||
+ !list_empty(&omap_crtc->queued_applies) ||
+ omap_crtc->event || omap_crtc->old_fb) {
+
+ if (++loops > 10) {
+ dev_err(crtc->dev->dev,
+ "omap_crtc_flush() timeout\n");
+ break;
+ }
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
*/
static void omap_crtc_destroy(struct drm_crtc *crtc)
@@ -214,17 +471,13 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != omap_crtc->enabled) {
omap_crtc->enabled = enabled;
- omap_crtc->full_update = true;
omap_crtc_apply(crtc, &omap_crtc->apply);
- /* also enable our private plane: */
- WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
-
- /* and any attached overlay planes: */
+ /* Enable/disable all planes associated with the CRTC. */
for (i = 0; i < priv->num_planes; i++) {
struct drm_plane *plane = priv->planes[i];
if (plane->crtc == crtc)
- WARN_ON(omap_plane_dpms(plane, mode));
+ WARN_ON(omap_plane_set_enable(plane, enabled));
}
}
}
@@ -256,13 +509,17 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
mode->type, mode->flags);
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
- omap_crtc->full_update = true;
- return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ /*
+ * The primary plane CRTC can be reset if the plane is disabled directly
+ * through the universal plane API. Set it again here.
+ */
+ crtc->primary->crtc = crtc;
+
+ return omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -282,15 +539,13 @@ static void omap_crtc_commit(struct drm_crtc *crtc)
static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_plane *plane = omap_crtc->plane;
+ struct drm_plane *plane = crtc->primary;
struct drm_display_mode *mode = &crtc->mode;
return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void vblank_cb(void *arg)
@@ -299,6 +554,7 @@ static void vblank_cb(void *arg)
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
unsigned long flags;
+ struct drm_framebuffer *fb;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -306,10 +562,15 @@ static void vblank_cb(void *arg)
if (omap_crtc->event)
drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+ fb = omap_crtc->old_fb;
+
omap_crtc->event = NULL;
omap_crtc->old_fb = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
}
static void page_flip_worker(struct work_struct *work)
@@ -321,11 +582,10 @@ static void page_flip_worker(struct work_struct *work)
struct drm_gem_object *bo;
drm_modeset_lock(&crtc->mutex, NULL);
- omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- crtc->x << 16, crtc->y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- vblank_cb, crtc);
+ omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x, crtc->y, mode->hdisplay, mode->vdisplay,
+ vblank_cb, crtc);
drm_modeset_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->primary->fb, 0);
@@ -361,11 +621,12 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
if (omap_crtc->old_fb) {
spin_unlock_irqrestore(&dev->event_lock, flags);
dev_err(dev->dev, "already a pending flip\n");
- return -EINVAL;
+ return -EBUSY;
}
omap_crtc->event = event;
omap_crtc->old_fb = primary->fb = fb;
+ drm_framebuffer_reference(omap_crtc->old_fb);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -385,7 +646,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
static int omap_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_drm_private *priv = crtc->dev->dev_private;
if (property == priv->rotation_prop) {
@@ -393,7 +653,7 @@ static int omap_crtc_set_property(struct drm_crtc *crtc,
!!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
}
- return omap_plane_set_property(omap_crtc->plane, property, val);
+ return omap_plane_set_property(crtc->primary, property, val);
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
@@ -412,256 +672,15 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_base = omap_crtc_mode_set_base,
};
-const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
-}
-
-enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return omap_crtc->channel;
-}
-
-static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, error_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
- DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
- /* avoid getting in a flood, unregister the irq until next vblank */
- __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, apply_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
-
- if (!omap_crtc->error_irq.registered)
- __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-
- if (!dispc_mgr_go_busy(omap_crtc->channel)) {
- struct omap_drm_private *priv =
- crtc->dev->dev_private;
- DBG("%s: apply done", omap_crtc->name);
- __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-}
-
-static void apply_worker(struct work_struct *work)
-{
- struct omap_crtc *omap_crtc =
- container_of(work, struct omap_crtc, apply_work);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct omap_drm_apply *apply, *n;
- bool need_apply;
-
- /*
- * Synchronize everything on mode_config.mutex, to keep
- * the callbacks and list modification all serialized
- * with respect to modesetting ioctls from userspace.
- */
- drm_modeset_lock(&crtc->mutex, NULL);
- dispc_runtime_get();
-
- /*
- * If we are still pending a previous update, wait.. when the
- * pending update completes, we get kicked again.
- */
- if (omap_crtc->apply_irq.registered)
- goto out;
-
- /* finish up previous apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->pending_applies, pending_node) {
- apply->post_apply(apply);
- list_del(&apply->pending_node);
- }
-
- need_apply = !list_empty(&omap_crtc->queued_applies);
-
- /* then handle the next round of of queued apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->queued_applies, queued_node) {
- apply->pre_apply(apply);
- list_del(&apply->queued_node);
- apply->queued = false;
- list_add_tail(&apply->pending_node,
- &omap_crtc->pending_applies);
- }
-
- if (need_apply) {
- enum omap_channel channel = omap_crtc->channel;
-
- DBG("%s: GO", omap_crtc->name);
-
- if (dispc_mgr_is_enabled(channel)) {
- omap_irq_register(dev, &omap_crtc->apply_irq);
- dispc_mgr_go(channel);
- } else {
- struct omap_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
- }
-
-out:
- dispc_runtime_put();
- drm_modeset_unlock(&crtc->mutex);
-}
-
-int omap_crtc_apply(struct drm_crtc *crtc,
- struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* no need to queue it again if it is already queued: */
- if (apply->queued)
- return 0;
-
- apply->queued = true;
- list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
-
- /*
- * If there are no currently pending updates, then go ahead and
- * kick the worker immediately, otherwise it will run again when
- * the current update finishes.
- */
- if (list_empty(&omap_crtc->pending_applies)) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-
- return 0;
-}
-
-/* called only from apply */
-static void set_enabled(struct drm_crtc *crtc, bool enable)
-{
- struct drm_device *dev = crtc->dev;
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- enum omap_channel channel = omap_crtc->channel;
- struct omap_irq_wait *wait;
- u32 framedone_irq, vsync_irq;
- int ret;
-
- if (dispc_mgr_is_enabled(channel) == enable)
- return;
-
- /*
- * Digit output produces some sync lost interrupts during the first
- * frame when enabling, so we need to ignore those.
- */
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-
- framedone_irq = dispc_mgr_get_framedone_irq(channel);
- vsync_irq = dispc_mgr_get_vsync_irq(channel);
-
- if (enable) {
- wait = omap_irq_wait_init(dev, vsync_irq, 1);
- } else {
- /*
- * When we disable the digit output, we need to wait for
- * FRAMEDONE to know that DISPC has finished with the output.
- *
- * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
- * that case we need to use vsync interrupt, and wait for both
- * even and odd frames.
- */
-
- if (framedone_irq)
- wait = omap_irq_wait_init(dev, framedone_irq, 1);
- else
- wait = omap_irq_wait_init(dev, vsync_irq, 2);
- }
-
- dispc_mgr_enable(channel, enable);
-
- ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
- if (ret) {
- dev_err(dev->dev, "%s: timeout waiting for %s\n",
- omap_crtc->name, enable ? "enable" : "disable");
- }
-
- omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc =
- container_of(apply, struct omap_crtc, apply);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_encoder *encoder = NULL;
-
- DBG("%s: enabled=%d, full=%d", omap_crtc->name,
- omap_crtc->enabled, omap_crtc->full_update);
-
- if (omap_crtc->full_update) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- int i;
- for (i = 0; i < priv->num_encoders; i++) {
- if (priv->encoders[i]->crtc == crtc) {
- encoder = priv->encoders[i];
- break;
- }
- }
- }
-
- if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
- omap_encoder_set_enabled(omap_crtc->current_encoder, false);
-
- omap_crtc->current_encoder = encoder;
-
- if (!omap_crtc->enabled) {
- if (encoder)
- omap_encoder_set_enabled(encoder, false);
- } else {
- if (encoder) {
- omap_encoder_set_enabled(encoder, false);
- omap_encoder_update(encoder, omap_crtc->mgr,
- &omap_crtc->timings);
- omap_encoder_set_enabled(encoder, true);
- }
- }
-
- omap_crtc->full_update = false;
-}
-
-static void omap_crtc_post_apply(struct omap_drm_apply *apply)
-{
- /* nothing needed for post-apply */
-}
-
-void omap_crtc_flush(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- int loops = 0;
-
- while (!list_empty(&omap_crtc->pending_applies) ||
- !list_empty(&omap_crtc->queued_applies) ||
- omap_crtc->event || omap_crtc->old_fb) {
-
- if (++loops > 10) {
- dev_err(crtc->dev->dev,
- "omap_crtc_flush() timeout\n");
- break;
- }
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
- }
-}
+/* -----------------------------------------------------------------------------
+ * Init and Cleanup
+ */
static const char *channel_names[] = {
- [OMAP_DSS_CHANNEL_LCD] = "lcd",
- [OMAP_DSS_CHANNEL_DIGIT] = "tv",
- [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
- [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+ [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
void omap_crtc_pre_init(void)
@@ -681,12 +700,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
struct omap_overlay_manager_info *info;
+ int ret;
DBG("%s", channel_names[channel]);
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
- goto fail;
+ return NULL;
crtc = &omap_crtc->base;
@@ -700,8 +720,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->apply.post_apply = omap_crtc_post_apply;
omap_crtc->channel = channel;
- omap_crtc->plane = plane;
- omap_crtc->plane->crtc = crtc;
omap_crtc->name = channel_names[channel];
omap_crtc->pipe = id;
@@ -723,18 +741,18 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
info->trans_enabled = false;
- drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &omap_crtc_funcs);
+ if (ret < 0) {
+ kfree(omap_crtc);
+ return NULL;
+ }
+
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
- omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+ omap_plane_install_properties(crtc->primary, &crtc->base);
omap_crtcs[channel] = omap_crtc;
return crtc;
-
-fail:
- if (crtc)
- omap_crtc_destroy(crtc);
-
- return NULL;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..9f32a83ca507 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -148,11 +148,15 @@ struct refill_engine {
bool async;
- wait_queue_head_t wait_for_refill;
+ struct completion compl;
struct list_head idle_node;
};
+struct dmm_platform_data {
+ uint32_t cpu_cache_flags;
+};
+
struct dmm {
struct device *dev;
void __iomem *base;
@@ -183,6 +187,8 @@ struct dmm {
/* allocation list and lock */
struct list_head alloc_head;
+
+ const struct dmm_platform_data *plat_data;
};
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 56c60552abba..042038e8a662 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/list.h>
+#include <linux/completion.h>
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
@@ -39,6 +40,10 @@
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[];
+#endif
+
/* global spinlock for protecting lists */
static DEFINE_SPINLOCK(list_lock);
@@ -58,19 +63,19 @@ static const struct {
uint32_t slot_w; /* width of each slot (in pixels) */
uint32_t slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
- [TILFMT_8BIT] = GEOM(0, 0, 1),
- [TILFMT_16BIT] = GEOM(0, 1, 2),
- [TILFMT_32BIT] = GEOM(1, 1, 4),
- [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
};
/* lookup table for registers w/ per-engine instances */
static const uint32_t reg[][4] = {
- [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
- DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
- [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
- DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
/* simple allocator to grab next 16 byte aligned memory from txn */
@@ -142,10 +147,10 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
- wake_up_interruptible(&dmm->engines[i].wait_for_refill);
-
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
+
+ complete(&dmm->engines[i].compl);
}
status >>= 8;
@@ -269,15 +274,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
/* mark whether it is async to denote list management in IRQ handler */
engine->async = wait ? false : true;
+ reinit_completion(&engine->compl);
+ /* verify that the irq handler sees the 'async' and completion value */
+ smp_mb();
/* kick reload */
writel(engine->refill_pa,
dmm->base + reg[PAT_DESCR][engine->id]);
if (wait) {
- if (wait_event_interruptible_timeout(engine->wait_for_refill,
- wait_status(engine, DMM_PATSTATUS_READY) == 0,
- msecs_to_jiffies(1)) <= 0) {
+ if (!wait_for_completion_timeout(&engine->compl,
+ msecs_to_jiffies(1))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
@@ -529,6 +536,11 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
+uint32_t tiler_get_cpu_cache_flags(void)
+{
+ return omap_dmm->plat_data->cpu_cache_flags;
+}
+
bool dmm_is_available(void)
{
return omap_dmm ? true : false;
@@ -592,6 +604,18 @@ static int omap_dmm_probe(struct platform_device *dev)
init_waitqueue_head(&omap_dmm->engine_queue);
+ if (dev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(dmm_of_match, dev->dev.of_node);
+ if (!match) {
+ dev_err(&dev->dev, "failed to find matching device node\n");
+ return -ENODEV;
+ }
+
+ omap_dmm->plat_data = match->data;
+ }
+
/* lookup hwmod data - base address and irq */
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -696,7 +720,7 @@ static int omap_dmm_probe(struct platform_device *dev)
(REFILL_BUFFER_SIZE * i);
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
(REFILL_BUFFER_SIZE * i);
- init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+ init_completion(&omap_dmm->engines[i].compl);
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
@@ -941,7 +965,7 @@ error:
}
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int omap_dmm_resume(struct device *dev)
{
struct tcm_area area;
@@ -965,16 +989,28 @@ static int omap_dmm_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops omap_dmm_pm_ops = {
- .resume = omap_dmm_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
+
#if defined(CONFIG_OF)
+static const struct dmm_platform_data dmm_omap4_platform_data = {
+ .cpu_cache_flags = OMAP_BO_WC,
+};
+
+static const struct dmm_platform_data dmm_omap5_platform_data = {
+ .cpu_cache_flags = OMAP_BO_UNCACHED,
+};
+
static const struct of_device_id dmm_of_match[] = {
- { .compatible = "ti,omap4-dmm", },
- { .compatible = "ti,omap5-dmm", },
+ {
+ .compatible = "ti,omap4-dmm",
+ .data = &dmm_omap4_platform_data,
+ },
+ {
+ .compatible = "ti,omap5-dmm",
+ .data = &dmm_omap5_platform_data,
+ },
{},
};
#endif
@@ -986,9 +1022,7 @@ struct platform_driver omap_dmm_driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
-#ifdef CONFIG_PM
.pm = &omap_dmm_pm_ops,
-#endif
},
};
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..e83c78372db8 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -106,6 +106,7 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+uint32_t tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 8241ed9b353c..94920d47e3b6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -128,6 +128,29 @@ cleanup:
return r;
}
+static int omap_modeset_create_crtc(struct drm_device *dev, int id,
+ enum omap_channel channel)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ crtc = omap_crtc_init(dev, plane, channel, id);
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+
+ return 0;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -136,6 +159,7 @@ static int omap_modeset_init(struct drm_device *dev)
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
+ int ret;
drm_mode_config_init(dev);
@@ -209,18 +233,13 @@ static int omap_modeset_init(struct drm_device *dev)
* allocated crtc, we create a new crtc for it
*/
if (!channel_used(dev, channel)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, channel, id);
-
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
+ ret = omap_modeset_create_crtc(dev, id, channel);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n",
+ channel);
+ return ret;
+ }
id++;
}
@@ -234,26 +253,8 @@ static int omap_modeset_init(struct drm_device *dev)
/* find a free manager for this crtc */
for (i = 0; i < num_mgrs; i++) {
- if (!channel_used(dev, i)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, i, id);
-
- BUG_ON(priv->num_crtcs >=
- ARRAY_SIZE(priv->crtcs));
-
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
-
+ if (!channel_used(dev, i))
break;
- } else {
- continue;
- }
}
if (i == num_mgrs) {
@@ -261,13 +262,24 @@ static int omap_modeset_init(struct drm_device *dev)
dev_err(dev->dev, "no managers left for crtc\n");
return -ENOMEM;
}
+
+ ret = omap_modeset_create_crtc(dev, id, i);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n", i);
+ return ret;
+ }
}
/*
* Create normal planes for the remaining overlays:
*/
for (; id < num_ovls; id++) {
- struct drm_plane *plane = omap_plane_init(dev, id, false);
+ struct drm_plane *plane;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
priv->planes[priv->num_planes++] = plane;
@@ -286,14 +298,13 @@ static int omap_modeset_init(struct drm_device *dev)
for (id = 0; id < priv->num_crtcs; id++) {
struct drm_crtc *crtc = priv->crtcs[id];
enum omap_channel crtc_channel;
- enum omap_dss_output_id supported_outputs;
crtc_channel = omap_crtc_channel(crtc);
- supported_outputs =
- dss_feat_get_supported_outputs(crtc_channel);
- if (supported_outputs & output->id)
+ if (output->dispc_channel == crtc_channel) {
encoder->possible_crtcs |= (1 << id);
+ break;
+ }
}
omap_dss_put_device(output);
@@ -480,6 +491,7 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+ spin_lock_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
omap_gem_init(dev);
@@ -519,7 +531,8 @@ static int dev_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
- omap_fbdev_free(dev);
+ if (priv->fbdev)
+ omap_fbdev_free(dev);
/* flush crtcs so the fbs get released */
for (i = 0; i < priv->num_crtcs; i++)
@@ -588,9 +601,11 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
- if (ret)
- DBG("failed to restore crtc mode");
+ if (priv->fbdev) {
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+ }
}
static void dev_preclose(struct drm_device *dev, struct drm_file *file)
@@ -610,74 +625,57 @@ static const struct vm_operations_struct omap_gem_vm_ops = {
};
static const struct file_operations omapdriver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .mmap = omap_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
};
static struct drm_driver omap_drm_driver = {
- .driver_features =
- DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = dev_load,
- .unload = dev_unload,
- .open = dev_open,
- .lastclose = dev_lastclose,
- .preclose = dev_preclose,
- .postclose = dev_postclose,
- .set_busid = drm_platform_set_busid,
- .get_vblank_counter = drm_vblank_count,
- .enable_vblank = omap_irq_enable_vblank,
- .disable_vblank = omap_irq_disable_vblank,
- .irq_preinstall = omap_irq_preinstall,
- .irq_postinstall = omap_irq_postinstall,
- .irq_uninstall = omap_irq_uninstall,
- .irq_handler = omap_irq_handler,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM
+ | DRIVER_PRIME,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
#ifdef CONFIG_DEBUG_FS
- .debugfs_init = omap_debugfs_init,
- .debugfs_cleanup = omap_debugfs_cleanup,
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = omap_gem_prime_export,
- .gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
- .gem_vm_ops = &omap_gem_vm_ops,
- .dumb_create = omap_gem_dumb_create,
- .dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
- .ioctls = ioctls,
- .num_ioctls = DRM_OMAP_NUM_IOCTLS,
- .fops = &omapdriver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = omap_gem_prime_export,
+ .gem_prime_import = omap_gem_prime_import,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = &omapdriver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
-static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
-{
- DBG("");
- return 0;
-}
-
-static int pdev_resume(struct platform_device *device)
-{
- DBG("");
- return 0;
-}
-
-static void pdev_shutdown(struct platform_device *device)
-{
- DBG("");
-}
-
static int pdev_probe(struct platform_device *device)
{
int r;
@@ -709,24 +707,35 @@ static int pdev_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static const struct dev_pm_ops omapdrm_pm_ops = {
- .resume = omap_gem_resume,
-};
+#ifdef CONFIG_PM_SLEEP
+static int omap_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ return 0;
+}
+
+static int omap_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return omap_gem_resume(dev);
+}
#endif
+static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
+
static struct platform_driver pdev = {
- .driver = {
- .name = DRIVER_NAME,
-#ifdef CONFIG_PM
- .pm = &omapdrm_pm_ops,
-#endif
- },
- .probe = pdev_probe,
- .remove = pdev_remove,
- .suspend = pdev_suspend,
- .resume = pdev_resume,
- .shutdown = pdev_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &omapdrm_pm_ops,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
};
static int __init omap_drm_init(void)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 60e47b33c801..b31c79f15aed 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -105,6 +105,9 @@ struct omap_drm_private {
struct workqueue_struct *wq;
+ /* lock for obj_list below */
+ spinlock_t list_lock;
+
/* list of GEM objects: */
struct list_head obj_list;
@@ -160,15 +163,15 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
void omap_crtc_flush(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int plane_id, bool private_plane);
-int omap_plane_dpms(struct drm_plane *plane, int mode);
+ int id, enum drm_plane_type type);
+int omap_plane_set_enable(struct drm_plane *plane, bool enable);
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg);
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
int omap_plane_set_property(struct drm_plane *plane,
@@ -186,8 +189,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
@@ -208,8 +209,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h);
void omap_gem_init(struct drm_device *dev);
void omap_gem_deinit(struct drm_device *dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 2a5cacdc344b..b2c1a29cc12b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -86,6 +86,7 @@ struct plane {
struct omap_framebuffer {
struct drm_framebuffer base;
+ int pin_count;
const struct format *format;
struct plane planes[4];
};
@@ -121,18 +122,6 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned flags, unsigned color,
struct drm_clip_rect *clips, unsigned num_clips)
{
- int i;
-
- drm_modeset_lock_all(fb->dev);
-
- for (i = 0; i < num_clips; i++) {
- omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
- clips[i].x2 - clips[i].x1,
- clips[i].y2 - clips[i].y1);
- }
-
- drm_modeset_unlock_all(fb->dev);
-
return 0;
}
@@ -261,6 +250,11 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ if (omap_fb->pin_count > 0) {
+ omap_fb->pin_count++;
+ return 0;
+ }
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
@@ -269,6 +263,8 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
}
+ omap_fb->pin_count++;
+
return 0;
fail:
@@ -287,6 +283,11 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ omap_fb->pin_count--;
+
+ if (omap_fb->pin_count > 0)
+ return 0;
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_put_paddr(plane->bo);
@@ -336,34 +337,6 @@ struct drm_connector *omap_framebuffer_get_next_connector(
return NULL;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h)
-{
- struct drm_connector *connector = NULL;
-
- VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
-
- /* FIXME: This is racy - no protection against modeset config changes. */
- while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
- /* only consider connectors that are part of a chain */
- if (connector->encoder && connector->encoder->crtc) {
- /* TODO: maybe this should propagate thru the crtc who
- * could do the coordinate translation..
- */
- struct drm_crtc *crtc = connector->encoder->crtc;
- int cx = max(0, x - crtc->x);
- int cy = max(0, y - crtc->y);
- int cw = w + (x - crtc->x) - cx;
- int ch = h + (y - crtc->y) - cy;
-
- omap_connector_flush(connector, cx, cy, cw, ch);
- }
- }
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
@@ -407,7 +380,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- struct omap_framebuffer *omap_fb;
+ struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
const struct format *format = NULL;
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
@@ -450,6 +423,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
+ if (pitch % format->planes[i].stride_bpp != 0) {
+ dev_err(dev->dev,
+ "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n",
+ pitch, format->planes[i].stride_bpp);
+ ret = -EINVAL;
+ goto fail;
+ }
+
size = pitch * mode_cmd->height / format->planes[i].sub_y;
if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
@@ -478,8 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
return fb;
fail:
- if (fb)
- omap_framebuffer_destroy(fb);
+ kfree(omap_fb);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index d292d24b3a6e..950cd3389092 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -42,42 +42,8 @@ struct omap_fbdev {
struct work_struct work;
};
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
-static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t res;
-
- res = fb_sys_write(fbi, buf, count, ppos);
- omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
-
- return res;
-}
-
-static void omap_fbdev_fillrect(struct fb_info *fbi,
- const struct fb_fillrect *rect)
-{
- sys_fillrect(fbi, rect);
- omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void omap_fbdev_copyarea(struct fb_info *fbi,
- const struct fb_copyarea *area)
-{
- sys_copyarea(fbi, area);
- omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
-}
-
-static void omap_fbdev_imageblit(struct fb_info *fbi,
- const struct fb_image *image)
-{
- sys_imageblit(fbi, image);
- omap_fbdev_flush(fbi, image->dx, image->dy,
- image->width, image->height);
-}
-
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
@@ -121,10 +87,10 @@ static struct fb_ops omap_fb_ops = {
* basic fbdev ops which write to the framebuffer
*/
.fb_read = fb_sys_read,
- .fb_write = omap_fbdev_write,
- .fb_fillrect = omap_fbdev_fillrect,
- .fb_copyarea = omap_fbdev_copyarea,
- .fb_imageblit = omap_fbdev_imageblit,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -294,21 +260,6 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
return fbi->par;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
-{
- struct drm_fb_helper *helper = get_fb(fbi);
-
- if (!helper)
- return;
-
- VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
-
- omap_framebuffer_flush(helper->fb, x, y, w, h);
-}
-
/* initialize fbdev helper */
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index aeb91ed653c9..e9718b99a8a9 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -828,6 +828,7 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
+ omap_obj->paddr = 0;
omap_obj->block = NULL;
}
}
@@ -1272,13 +1273,16 @@ unlock:
void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ spin_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
+ spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
@@ -1358,8 +1362,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* currently don't allow cached buffers.. there is some caching
* stuff that needs to be handled better
*/
- flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
- flags |= OMAP_BO_WC;
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
+ flags |= tiler_get_cpu_cache_flags();
/* align dimensions to slot boundaries... */
tiler_align(gem2fmt(flags),
@@ -1376,7 +1380,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
if (!omap_obj)
goto fail;
+ spin_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
obj = &omap_obj->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a2dbfb1737b4..b46dabd9faf7 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -156,16 +156,16 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
}
static struct dma_buf_ops omap_dmabuf_ops = {
- .map_dma_buf = omap_gem_map_dma_buf,
- .unmap_dma_buf = omap_gem_unmap_dma_buf,
- .release = omap_gem_dmabuf_release,
- .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
- .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
- .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
- .kmap = omap_gem_dmabuf_kmap,
- .kunmap = omap_gem_dmabuf_kunmap,
- .mmap = omap_gem_dmabuf_mmap,
+ .map_dma_buf = omap_gem_map_dma_buf,
+ .unmap_dma_buf = omap_gem_unmap_dma_buf,
+ .release = omap_gem_dmabuf_release,
+ .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+ .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+ .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+ .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .kmap = omap_gem_dmabuf_kmap,
+ .kunmap = omap_gem_dmabuf_kunmap,
+ .mmap = omap_gem_dmabuf_mmap,
};
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index f035d2bceae7..3eb097efc488 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -34,7 +34,7 @@ static void omap_irq_update(struct drm_device *dev)
struct omap_drm_irq *irq;
uint32_t irqmask = priv->vblank_mask;
- BUG_ON(!spin_is_locked(&list_lock));
+ assert_spin_locked(&list_lock);
list_for_each_entry(irq, &priv->irq_list, node)
irqmask |= irq->irqmask;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index ee8e2b3a117e..1c6b63f39474 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -65,12 +65,16 @@ struct omap_plane {
struct callback apply_done_cb;
};
-static void unpin_worker(struct drm_flip_work *work, void *val)
+static void omap_plane_unpin_worker(struct drm_flip_work *work, void *val)
{
struct omap_plane *omap_plane =
container_of(work, struct omap_plane, unpin_work);
struct drm_device *dev = omap_plane->base.dev;
+ /*
+ * omap_framebuffer_pin/unpin are always called from priv->wq,
+ * so there's no need for locking here.
+ */
omap_framebuffer_unpin(val);
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
@@ -78,7 +82,8 @@ static void unpin_worker(struct drm_flip_work *work, void *val)
}
/* update which fb (if any) is pinned for scanout */
-static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+static int omap_plane_update_pin(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
@@ -121,13 +126,12 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
struct drm_crtc *crtc = plane->crtc;
enum omap_channel channel;
bool enabled = omap_plane->enabled && crtc;
- bool ilace, replication;
int ret;
DBG("%s, enabled=%d", omap_plane->name, enabled);
/* if fb has changed, pin new fb: */
- update_pin(plane, enabled ? plane->fb : NULL);
+ omap_plane_update_pin(plane, enabled ? plane->fb : NULL);
if (!enabled) {
dispc_ovl_enable(omap_plane->id, false);
@@ -145,20 +149,17 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
&info->paddr, &info->p_uv_addr);
- /* TODO: */
- ilace = false;
- replication = false;
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
/* and finally, update omapdss: */
- ret = dispc_ovl_setup(omap_plane->id, info,
- replication, omap_crtc_timings(crtc), false);
+ ret = dispc_ovl_setup(omap_plane->id, info, false,
+ omap_crtc_timings(crtc), false);
if (ret) {
dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
return;
}
dispc_ovl_enable(omap_plane->id, true);
- dispc_ovl_set_channel_out(omap_plane->id, channel);
}
static void omap_plane_post_apply(struct omap_drm_apply *apply)
@@ -167,7 +168,6 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_overlay_info *info = &omap_plane->info;
struct callback cb;
cb = omap_plane->apply_done_cb;
@@ -177,14 +177,9 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
if (cb.fxn)
cb.fxn(cb.arg);
-
- if (omap_plane->enabled) {
- omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
- info->out_width, info->out_height);
- }
}
-static int apply(struct drm_plane *plane)
+static int omap_plane_apply(struct drm_plane *plane)
{
if (plane->crtc) {
struct omap_plane *omap_plane = to_omap_plane(plane);
@@ -194,12 +189,12 @@ static int apply(struct drm_plane *plane)
}
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg)
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
@@ -209,11 +204,10 @@ int omap_plane_mode_set(struct drm_plane *plane,
win->crtc_w = crtc_w;
win->crtc_h = crtc_h;
- /* src values are in Q16 fixed point, convert to integer: */
- win->src_x = src_x >> 16;
- win->src_y = src_y >> 16;
- win->src_w = src_w >> 16;
- win->src_h = src_h >> 16;
+ win->src_x = src_x;
+ win->src_y = src_y;
+ win->src_w = src_w;
+ win->src_h = src_h;
if (fxn) {
/* omap_crtc should ensure that a new page flip
@@ -225,15 +219,7 @@ int omap_plane_mode_set(struct drm_plane *plane,
omap_plane->apply_done_cb.arg = arg;
}
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- plane->fb = fb;
- plane->crtc = crtc;
-
- return apply(plane);
+ return omap_plane_apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
@@ -254,17 +240,29 @@ static int omap_plane_update(struct drm_plane *plane,
break;
}
+ /*
+ * We don't need to take a reference to the framebuffer as the DRM core
+ * has already done so for the purpose of setting plane->fb.
+ */
+ plane->fb = fb;
+ plane->crtc = crtc;
+
+ /* src values are in Q16 fixed point, convert to integer: */
return omap_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h,
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+
omap_plane->win.rotation = BIT(DRM_ROTATE_0);
- return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ omap_plane->info.zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
+ ? 0 : omap_plane->id;
+
+ return omap_plane_set_enable(plane, false);
}
static void omap_plane_destroy(struct drm_plane *plane)
@@ -275,7 +273,6 @@ static void omap_plane_destroy(struct drm_plane *plane)
omap_irq_unregister(plane->dev, &omap_plane->error_irq);
- omap_plane_disable(plane);
drm_plane_cleanup(plane);
drm_flip_work_cleanup(&omap_plane->unpin_work);
@@ -283,18 +280,15 @@ static void omap_plane_destroy(struct drm_plane *plane)
kfree(omap_plane);
}
-int omap_plane_dpms(struct drm_plane *plane, int mode)
+int omap_plane_set_enable(struct drm_plane *plane, bool enable)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
- int ret = 0;
- if (enabled != omap_plane->enabled) {
- omap_plane->enabled = enabled;
- ret = apply(plane);
- }
+ if (enable == omap_plane->enabled)
+ return 0;
- return ret;
+ omap_plane->enabled = enable;
+ return omap_plane_apply(plane);
}
/* helper to install properties which are common to planes and crtcs */
@@ -342,61 +336,63 @@ int omap_plane_set_property(struct drm_plane *plane,
if (property == priv->rotation_prop) {
DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
} else if (property == priv->zorder_prop) {
DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
}
return ret;
}
static const struct drm_plane_funcs omap_plane_funcs = {
- .update_plane = omap_plane_update,
- .disable_plane = omap_plane_disable,
- .destroy = omap_plane_destroy,
- .set_property = omap_plane_set_property,
+ .update_plane = omap_plane_update,
+ .disable_plane = omap_plane_disable,
+ .destroy = omap_plane_destroy,
+ .set_property = omap_plane_set_property,
};
static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
{
struct omap_plane *omap_plane =
container_of(irq, struct omap_plane, error_irq);
- DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name,
+ irqstatus);
}
static const char *plane_names[] = {
- [OMAP_DSS_GFX] = "gfx",
- [OMAP_DSS_VIDEO1] = "vid1",
- [OMAP_DSS_VIDEO2] = "vid2",
- [OMAP_DSS_VIDEO3] = "vid3",
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
};
static const uint32_t error_irqs[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, bool private_plane)
+ int id, enum drm_plane_type type)
{
struct omap_drm_private *priv = dev->dev_private;
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
+ int ret;
- DBG("%s: priv=%d", plane_names[id], private_plane);
+ DBG("%s: type=%d", plane_names[id], type);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
- return NULL;
+ return ERR_PTR(-ENOMEM);
drm_flip_work_init(&omap_plane->unpin_work,
- "unpin", unpin_worker);
+ "unpin", omap_plane_unpin_worker);
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -413,8 +409,11 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
- drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
- omap_plane->formats, omap_plane->nformats, private_plane);
+ ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
+ &omap_plane_funcs, omap_plane->formats,
+ omap_plane->nformats, type);
+ if (ret < 0)
+ goto error;
omap_plane_install_properties(plane, &plane->base);
@@ -432,10 +431,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
- if (private_plane)
+ if (type == DRM_PLANE_TYPE_PRIMARY)
omap_plane->info.zorder = 0;
else
omap_plane->info.zorder = id;
return plane;
+
+error:
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+ kfree(omap_plane);
+ return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d84583776d50..6d64c7bb908b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -11,6 +11,7 @@ config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
DRM panel driver for dumb panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 39806c335339..30904a9b2a4c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -33,9 +33,14 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ const struct display_timing *timings;
+ unsigned int num_timings;
unsigned int bpc;
@@ -94,6 +99,25 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
if (!panel->desc)
return 0;
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
@@ -226,12 +250,30 @@ static int panel_simple_get_modes(struct drm_panel *panel)
return num;
}
+static int panel_simple_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
.unprepare = panel_simple_unprepare,
.prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
+ .get_timings = panel_simple_get_timings,
};
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
@@ -327,6 +369,31 @@ static void panel_simple_shutdown(struct device *dev)
panel_simple_disable(&panel->base);
}
+static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 0 + 255,
+ .htotal = 800 + 0 + 255 + 0,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 45,
+ .vtotal = 480 + 2 + 45 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am800480r3tmqwa1h = {
+ .modes = &ampire_am800480r3tmqwa1h_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
@@ -350,6 +417,29 @@ static const struct panel_desc auo_b101aw03 = {
},
};
+static const struct drm_display_mode auo_b101ean01_mode = {
+ .clock = 72500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 119,
+ .hsync_end = 1280 + 119 + 32,
+ .htotal = 1280 + 119 + 32 + 21,
+ .vdisplay = 800,
+ .vsync_start = 800 + 4,
+ .vsync_end = 800 + 4 + 20,
+ .vtotal = 800 + 4 + 20 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .modes = &auo_b101ean01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -615,24 +705,25 @@ static const struct panel_desc giantplus_gpg482739qs5 = {
.width = 95,
.height = 54,
},
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
-static const struct drm_display_mode hannstar_hsd070pww1_mode = {
- .clock = 71100,
- .hdisplay = 1280,
- .hsync_start = 1280 + 1,
- .hsync_end = 1280 + 1 + 158,
- .htotal = 1280 + 1 + 158 + 1,
- .vdisplay = 800,
- .vsync_start = 800 + 1,
- .vsync_end = 800 + 1 + 21,
- .vtotal = 800 + 1 + 21 + 1,
- .vrefresh = 60,
+static const struct display_timing hannstar_hsd070pww1_timing = {
+ .pixelclock = { 64300000, 71100000, 82000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 1, 1, 10 },
+ .hback_porch = { 1, 1, 10 },
+ .hsync_len = { 52, 158, 661 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 1, 10 },
+ .vback_porch = { 1, 1, 10 },
+ .vsync_len = { 1, 21, 203 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd070pww1 = {
- .modes = &hannstar_hsd070pww1_mode,
- .num_modes = 1,
+ .timings = &hannstar_hsd070pww1_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 151,
@@ -663,6 +754,31 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
},
};
+static const struct drm_display_mode innolux_at043tn24_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 11,
+ .vtotal = 272 + 2 + 11 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_at043tn24 = {
+ .modes = &innolux_at043tn24_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
.hdisplay = 1280,
@@ -733,6 +849,29 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_zj070na_01p_mode = {
+ .clock = 51501,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 128,
+ .hsync_end = 1024 + 128 + 64,
+ .htotal = 1024 + 128 + 64 + 128,
+ .vdisplay = 600,
+ .vsync_start = 600 + 16,
+ .vsync_end = 600 + 16 + 4,
+ .vtotal = 600 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_zj070na_01p = {
+ .modes = &innolux_zj070na_01p_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 1024,
+ .height = 600,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -756,6 +895,30 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
+ .clock = 25000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 10,
+ .htotal = 480 + 10 + 10 + 15,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 3,
+ .vtotal = 800 + 3 + 3 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc ortustech_com43h4m85ulc = {
+ .modes = &ortustech_com43h4m85ulc_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 56,
+ .height = 93,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -779,11 +942,63 @@ static const struct panel_desc samsung_ltn101nt05 = {
},
};
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 1,
+ .hsync_end = 800 + 1 + 64,
+ .htotal = 800 + 1 + 64 + 64,
+ .vdisplay = 480,
+ .vsync_start = 480 + 1,
+ .vsync_end = 480 + 1 + 23,
+ .vtotal = 480 + 1 + 23 + 22,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc shelly_sca07010_bfn_lnn = {
+ .modes = &shelly_sca07010_bfn_lnn_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am800480r3tmqwa1h",
+ .data = &ampire_am800480r3tmqwa1h,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
@@ -826,6 +1041,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "hit,tx23d38vm0caa",
.data = &hitachi_tx23d38vm0caa
}, {
+ .compatible = "innolux,at043tn24",
+ .data = &innolux_at043tn24,
+ }, {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
@@ -835,12 +1053,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,zj070na-01p",
+ .data = &innolux_zj070na_01p,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "ortustech,com43h4m85ulc",
+ .data = &ortustech_com43h4m85ulc,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "shelly,sca07010-bfn-lnn",
+ .data = &shelly_sca07010_bfn_lnn,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d9b80c91a15..e2d07085b6a5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -102,7 +102,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc->enabled)
(*crtc_funcs->disable)(crtc);
}
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 970f8e92dbb7..421ae130809b 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,3 +1,11 @@
+config DRM_RADEON_USERPTR
+ bool "Always enable userptr support"
+ depends on DRM_RADEON
+ select MMU_NOTIFIER
+ help
+ This option selects CONFIG_MMU_NOTIFIER if it isn't already
+ selected to enabled full userptr support.
+
config DRM_RADEON_UMS
bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 4605633e253b..dea53e36a2ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -81,7 +81,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
- radeon_sync.o radeon_audio.o
+ radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o
radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 86807ee91bd1..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -606,6 +610,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
+ if (radeon_encoder->is_mst_encoder) {
+ struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
@@ -952,7 +963,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
- if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ if (radeon_encoder->is_mst_encoder) {
+ radeon_dp_mst_prepare_pll(crtc, mode);
+ } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
@@ -2069,6 +2082,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->connector = NULL;
return false;
}
+ if (radeon_crtc->encoder) {
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+
+ radeon_crtc->output_csc = radeon_encoder->output_csc;
+ }
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8d74de82456e..3e3290c203c6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -158,7 +158,7 @@ done:
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
static ssize_t
-radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct radeon_i2c_chan *chan =
container_of(aux, struct radeon_i2c_chan, aux);
@@ -226,11 +226,20 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
- radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
+ if (ASIC_IS_DCE5(rdev)) {
+ if (radeon_auxch)
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
+ else
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+ } else {
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+ }
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
if (!ret)
@@ -301,8 +310,8 @@ static int dp_get_max_dp_pix_clock(int link_rate,
/***** radeon specific DP functions *****/
-static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- u8 dpcd[DP_DPCD_SIZE])
+int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c39c1d0d9d4e..f57c1ab617bc 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -671,7 +671,15 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ struct radeon_encoder_atom_dig *dig_enc;
+ if (radeon_encoder_is_digital(encoder)) {
+ dig_enc = radeon_encoder->enc_priv;
+ if (dig_enc->active_mst_links)
+ return ATOM_ENCODER_MODE_DP_MST;
+ }
+ if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
+ return ATOM_ENCODER_MODE_DP_MST;
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
@@ -823,7 +831,7 @@ union dig_encoder_control {
};
void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -920,7 +928,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ if (enc_override != -1)
+ args.v3.acConfig.ucDigSel = enc_override;
+ else
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
case 4:
@@ -948,7 +959,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
}
- args.v4.acConfig.ucDigSel = dig->dig_encoder;
+
+ if (enc_override != -1)
+ args.v4.acConfig.ucDigSel = enc_override;
+ else
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
@@ -969,6 +984,12 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
}
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+ atombios_dig_encoder_setup2(encoder, action, panel_mode, -1);
+}
+
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
@@ -978,7 +999,7 @@ union dig_transmitter_control {
};
void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1328,7 +1349,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v5.asConfig.ucHPDSel = 0;
else
args.v5.asConfig.ucHPDSel = hpd_id + 1;
- args.v5.ucDigEncoderSel = 1 << dig_encoder;
+ args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder);
args.v5.ucDPLaneSet = lane_set;
break;
default:
@@ -1344,6 +1365,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+ atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1);
+}
+
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
@@ -1687,6 +1714,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+
+ /* don't power off encoders with active MST links */
+ if (dig->active_mst_links)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1955,6 +1987,53 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+void
+atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+ union crtc_source_param args;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ if (frev != 1 && crev != 2)
+ DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
+
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
+
+ switch (fe) {
+ case 0:
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case 6:
+ args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -2003,7 +2082,14 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
-static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx)
+{
+ if (enc_idx < 0)
+ return;
+ rdev->mode_info.active_encoders &= ~(1 << enc_idx);
+}
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -2012,71 +2098,79 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
+ int enc_idx = -1;
+ if (fe_idx >= 0) {
+ enc_idx = fe_idx;
+ goto assigned;
+ }
if (ASIC_IS_DCE6(rdev)) {
/* DCE6 */
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- return 3;
+ enc_idx = 3;
else
- return 2;
+ enc_idx = 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- return 5;
+ enc_idx = 5;
else
- return 4;
+ enc_idx = 4;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
+ enc_idx = 6;
break;
}
+ goto assigned;
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 */
if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
} else
/* llano follows DCE3.2 */
- return radeon_crtc->crtc_id;
+ enc_idx = radeon_crtc->crtc_id;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- return 3;
+ enc_idx = 3;
else
- return 2;
+ enc_idx = 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- return 5;
+ enc_idx = 5;
else
- return 4;
+ enc_idx = 4;
break;
}
}
+ goto assigned;
}
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
- return radeon_crtc->crtc_id;
+ enc_idx = radeon_crtc->crtc_id;
+ goto assigned;
}
/* on DCE3 - LVTMA can only be driven by DIGB */
@@ -2104,6 +2198,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
if (!(dig_enc_in_use & 1))
return 0;
return 1;
+
+assigned:
+ if (enc_idx == -1) {
+ DRM_ERROR("Got encoder index incorrect - returning 0\n");
+ return 0;
+ }
+ if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+ DRM_ERROR("chosen encoder in use %d\n", enc_idx);
+ }
+ rdev->mode_info.active_encoders |= (1 << enc_idx);
+ return enc_idx;
}
/* This only needs to be called once at startup */
@@ -2362,7 +2467,9 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig) {
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (dig->dig_encoder >= 0)
+ radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1);
if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
if (rdev->family >= CHIP_R600)
dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
@@ -2464,10 +2571,18 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- dig = radeon_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- radeon_encoder->active_device = 0;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, false);
+ }
+ if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) {
+ dig = radeon_encoder->enc_priv;
+ radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+ dig->dig_encoder = -1;
+ radeon_encoder->active_device = 0;
+ }
+ } else
+ radeon_encoder->active_device = 0;
}
/* these are handled by the primary encoders */
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index db08f17be76b..69556f5e247e 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2751,13 +2751,54 @@ void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
else /* current_index == 2 */
pl = &ps->high;
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- if (rdev->family >= CHIP_CEDAR) {
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
- } else {
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc);
- }
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ }
+}
+
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
}
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index bcd2f1fe803f..8730562323a8 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5922,6 +5922,20 @@ void ci_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ u32 sclk = ci_get_average_sclk_freq(rdev);
+
+ return sclk;
+}
+
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ u32 mclk = ci_get_average_mclk_freq(rdev);
+
+ return mclk;
+}
+
u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct ci_power_info *pi = ci_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 3e670d344a20..28faea9996f9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -141,6 +141,39 @@ static void cik_fini_cg(struct radeon_device *rdev);
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
+/**
+ * cik_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case GRBM_STATUS_SE2:
+ case GRBM_STATUS_SE3:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET):
+ case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ /* TODO VCE */
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
{
@@ -7394,12 +7427,12 @@ int cik_irq_set(struct radeon_device *rdev)
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -7486,27 +7519,27 @@ int cik_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("cik_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("cik_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("cik_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("cik_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("cik_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("cik_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -7678,6 +7711,36 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+ if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
}
/**
@@ -7803,6 +7866,7 @@ int cik_irq_process(struct radeon_device *rdev)
u8 me_id, pipe_id, queue_id;
u32 ring_index;
bool queue_hotplug = false;
+ bool queue_dp = false;
bool queue_reset = false;
u32 addr, status, mc_client;
bool queue_thermal = false;
@@ -8048,6 +8112,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -8256,6 +8362,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_reset) {
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 243a36c93b8f..0089d837a8e3 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2088,6 +2088,8 @@
# define CLK_OD(x) ((x) << 6)
# define CLK_OD_MASK (0x1f << 6)
+#define UVD_STATUS 0xf6bc
+
/* UVD clocks */
#define CG_DCLK_CNTL 0xC050009C
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 973df064c14f..f848acfd3fc8 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1006,6 +1006,34 @@ static void evergreen_init_golden_registers(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case DMA_STATUS_REG:
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
@@ -4392,12 +4420,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
if (rdev->family == CHIP_ARUBA)
thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
@@ -4486,27 +4514,27 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("evergreen_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("evergreen_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("evergreen_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("evergreen_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("evergreen_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.afmt[0]) {
DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
@@ -4700,6 +4728,38 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
@@ -4780,6 +4840,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
+ bool queue_dp = false;
bool queue_thermal = false;
u32 status, addr;
@@ -5019,6 +5080,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -5151,6 +5254,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a8d1d5240fcb..4aa5f755572b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1520,6 +1520,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_STATUS 0xf6bc
/*
* PM4
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 0e236d067d66..2d71da448487 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2820,6 +2820,29 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
}
}
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 current_index =
+ (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+ CURR_SCLK_INDEX_SHIFT;
+ u32 sclk;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ return 0;
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ return sclk;
+ }
+}
+
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void kv_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *rps)
{
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index dab00812abaa..e8a496ff007e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -828,6 +828,35 @@ out:
return err;
}
+/**
+ * cayman_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+ case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
int tn_get_temp(struct radeon_device *rdev)
{
u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 7bc9f8d9804a..c3d531a1114b 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -4319,6 +4319,42 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
}
}
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->mclk;
+ }
+}
+
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index 5db7b7d6feb0..da310a70c0f0 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -83,4 +83,48 @@
# define NI_REGAMMA_PROG_B 4
# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+#define NI_DP_MSE_LINK_TIMING 0x73a0
+# define NI_DP_MSE_LINK_FRAME (((x) & 0x3ff) << 0)
+# define NI_DP_MSE_LINK_LINE (((x) & 0x3) << 16)
+
+#define NI_DP_MSE_MISC_CNTL 0x736c
+# define NI_DP_MSE_BLANK_CODE (((x) & 0x1) << 0)
+# define NI_DP_MSE_TIMESTAMP_MODE (((x) & 0x1) << 4)
+# define NI_DP_MSE_ZERO_ENCODER (((x) & 0x1) << 8)
+
+#define NI_DP_MSE_RATE_CNTL 0x7384
+# define NI_DP_MSE_RATE_Y(x) (((x) & 0x3ffffff) << 0)
+# define NI_DP_MSE_RATE_X(x) (((x) & 0x3f) << 26)
+
+#define NI_DP_MSE_RATE_UPDATE 0x738c
+
+#define NI_DP_MSE_SAT0 0x7390
+# define NI_DP_MSE_SAT_SRC0(x) (((x) & 0x7) << 0)
+# define NI_DP_MSE_SAT_SLOT_COUNT0(x) (((x) & 0x3f) << 8)
+# define NI_DP_MSE_SAT_SRC1(x) (((x) & 0x7) << 16)
+# define NI_DP_MSE_SAT_SLOT_COUNT1(x) (((x) & 0x3f) << 24)
+
+#define NI_DP_MSE_SAT1 0x7394
+
+#define NI_DP_MSE_SAT2 0x7398
+
+#define NI_DP_MSE_SAT_UPDATE 0x739c
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
+# define NI_DIG_FE_DIG_MODE(x) (((x) & 0x7) << 16)
+# define NI_DIG_MODE_DP_SST 0
+# define NI_DIG_MODE_LVDS 1
+# define NI_DIG_MODE_TMDS_DVI 2
+# define NI_DIG_MODE_TMDS_HDMI 3
+# define NI_DIG_MODE_DP_MST 5
+# define NI_DIG_HPD_SELECT(x) (((x) & 0x7) << 28)
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_SOURCE_SELECT(x) (((x) & 0x3) << 0)
+# define NI_DIG_STEREOSYNC_SELECT(x) (((x) & 0x3) << 4)
+# define NI_DIG_STEREOSYNC_GATE_EN(x) (((x) & 0x1) << 8)
+# define NI_DIG_DUAL_LINK_ENABLE(x) (((x) & 0x1) << 16)
+# define NI_DIG_SWAP(x) (((x) & 0x1) << 18)
+# define NI_DIG_SYMCLK_FE_ON (0x1 << 24)
#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 6b44580440d0..3b290838918c 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -816,6 +816,52 @@
#define MC_PMG_CMD_MRS2 0x2b5c
#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
+#define AUX_CONTROL 0x6200
+#define AUX_EN (1 << 0)
+#define AUX_LS_READ_EN (1 << 8)
+#define AUX_LS_UPDATE_DISABLE(x) (((x) & 0x1) << 12)
+#define AUX_HPD_DISCON(x) (((x) & 0x1) << 16)
+#define AUX_DET_EN (1 << 18)
+#define AUX_HPD_SEL(x) (((x) & 0x7) << 20)
+#define AUX_IMPCAL_REQ_EN (1 << 24)
+#define AUX_TEST_MODE (1 << 28)
+#define AUX_DEGLITCH_EN (1 << 29)
+#define AUX_SW_CONTROL 0x6204
+#define AUX_SW_GO (1 << 0)
+#define AUX_LS_READ_TRIG (1 << 2)
+#define AUX_SW_START_DELAY(x) (((x) & 0xf) << 4)
+#define AUX_SW_WR_BYTES(x) (((x) & 0x1f) << 16)
+
+#define AUX_SW_INTERRUPT_CONTROL 0x620c
+#define AUX_SW_DONE_INT (1 << 0)
+#define AUX_SW_DONE_ACK (1 << 1)
+#define AUX_SW_DONE_MASK (1 << 2)
+#define AUX_SW_LS_DONE_INT (1 << 4)
+#define AUX_SW_LS_DONE_MASK (1 << 6)
+#define AUX_SW_STATUS 0x6210
+#define AUX_SW_DONE (1 << 0)
+#define AUX_SW_REQ (1 << 1)
+#define AUX_SW_RX_TIMEOUT_STATE(x) (((x) & 0x7) << 4)
+#define AUX_SW_RX_TIMEOUT (1 << 7)
+#define AUX_SW_RX_OVERFLOW (1 << 8)
+#define AUX_SW_RX_HPD_DISCON (1 << 9)
+#define AUX_SW_RX_PARTIAL_BYTE (1 << 10)
+#define AUX_SW_NON_AUX_MODE (1 << 11)
+#define AUX_SW_RX_MIN_COUNT_VIOL (1 << 12)
+#define AUX_SW_RX_INVALID_STOP (1 << 14)
+#define AUX_SW_RX_SYNC_INVALID_L (1 << 17)
+#define AUX_SW_RX_SYNC_INVALID_H (1 << 18)
+#define AUX_SW_RX_INVALID_START (1 << 19)
+#define AUX_SW_RX_RECV_NO_DET (1 << 20)
+#define AUX_SW_RX_RECV_INVALID_H (1 << 22)
+#define AUX_SW_RX_RECV_INVALID_V (1 << 23)
+
+#define AUX_SW_DATA 0x6218
+#define AUX_SW_DATA_RW (1 << 0)
+#define AUX_SW_DATA_MASK(x) (((x) & 0xff) << 8)
+#define AUX_SW_DATA_INDEX(x) (((x) & 0x1f) << 16)
+#define AUX_SW_AUTOINCREMENT_DISABLE (1 << 31)
+
#define LB_SYNC_RESET_SEL 0x6b28
#define LB_SYNC_RESET_SEL_MASK (3 << 0)
#define LB_SYNC_RESET_SEL_SHIFT 0
@@ -1086,6 +1132,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
+#define UVD_STATUS 0xf6bc
/*
* PM4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2fcad344492f..8f6d862a1882 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -109,6 +109,32 @@ extern int evergreen_rlc_resume(struct radeon_device *rdev);
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
/**
+ * r600_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case R_000E50_SRBM_STATUS:
+ case DMA_STATUS_REG:
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* r600_get_xclk - get the xclk
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 33d5a4f4eebd..d2abe481954f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -111,6 +111,8 @@ extern int radeon_deep_color;
extern int radeon_use_pflipirq;
extern int radeon_bapm;
extern int radeon_backlight;
+extern int radeon_auxch;
+extern int radeon_mst;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -505,7 +507,7 @@ struct radeon_bo {
pid_t pid;
struct radeon_mn *mn;
- struct interval_tree_node mn_it;
+ struct list_head mn_list;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1857,6 +1859,8 @@ struct radeon_asic {
u32 (*get_xclk)(struct radeon_device *rdev);
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
+ /* get register for info ioctl */
+ int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1985,6 +1989,8 @@ struct radeon_asic {
u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
+ u32 (*get_current_sclk)(struct radeon_device *rdev);
+ u32 (*get_current_mclk)(struct radeon_device *rdev);
} dpm;
/* pageflipping */
struct {
@@ -2408,6 +2414,7 @@ struct radeon_device {
struct radeon_rlc rlc;
struct radeon_mec mec;
struct work_struct hotplug_work;
+ struct work_struct dp_work;
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -2932,6 +2939,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
+#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
@@ -2950,6 +2958,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
+#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
+#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index c0ecd128b14b..fafd8ce4d58f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -136,6 +136,11 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
}
}
+static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ return -EINVAL;
+}
/* helper to disable agp */
/**
@@ -199,6 +204,7 @@ static struct radeon_asic r100_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -266,6 +272,7 @@ static struct radeon_asic r200_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -361,6 +368,7 @@ static struct radeon_asic r300_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -428,6 +436,7 @@ static struct radeon_asic r300_asic_pcie = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -495,6 +504,7 @@ static struct radeon_asic r420_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -562,6 +572,7 @@ static struct radeon_asic rs400_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
@@ -629,6 +640,7 @@ static struct radeon_asic rs600_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -696,6 +708,7 @@ static struct radeon_asic rs690_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
@@ -763,6 +776,7 @@ static struct radeon_asic rv515_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -830,6 +844,7 @@ static struct radeon_asic r520_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -925,6 +940,7 @@ static struct radeon_asic r600_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1009,6 +1025,7 @@ static struct radeon_asic rv6xx_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1080,6 +1097,8 @@ static struct radeon_asic rv6xx_asic = {
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv6xx_dpm_force_performance_level,
+ .get_current_sclk = &rv6xx_dpm_get_current_sclk,
+ .get_current_mclk = &rv6xx_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rs600_page_flip,
@@ -1099,6 +1118,7 @@ static struct radeon_asic rs780_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1170,6 +1190,8 @@ static struct radeon_asic rs780_asic = {
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rs780_dpm_force_performance_level,
+ .get_current_sclk = &rs780_dpm_get_current_sclk,
+ .get_current_mclk = &rs780_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rs600_page_flip,
@@ -1202,6 +1224,7 @@ static struct radeon_asic rv770_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1274,6 +1297,8 @@ static struct radeon_asic rv770_asic = {
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &rv770_dpm_vblank_too_short,
+ .get_current_sclk = &rv770_dpm_get_current_sclk,
+ .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rv770_page_flip,
@@ -1319,6 +1344,7 @@ static struct radeon_asic evergreen_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1391,6 +1417,8 @@ static struct radeon_asic evergreen_asic = {
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &cypress_dpm_vblank_too_short,
+ .get_current_sclk = &rv770_dpm_get_current_sclk,
+ .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1410,6 +1438,7 @@ static struct radeon_asic sumo_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1481,6 +1510,8 @@ static struct radeon_asic sumo_asic = {
.print_power_state = &sumo_dpm_print_power_state,
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
.force_performance_level = &sumo_dpm_force_performance_level,
+ .get_current_sclk = &sumo_dpm_get_current_sclk,
+ .get_current_mclk = &sumo_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1500,6 +1531,7 @@ static struct radeon_asic btc_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1572,6 +1604,8 @@ static struct radeon_asic btc_asic = {
.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
+ .get_current_sclk = &btc_dpm_get_current_sclk,
+ .get_current_mclk = &btc_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1634,6 +1668,7 @@ static struct radeon_asic cayman_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1717,6 +1752,8 @@ static struct radeon_asic cayman_asic = {
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ni_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
+ .get_current_sclk = &ni_dpm_get_current_sclk,
+ .get_current_mclk = &ni_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1736,6 +1773,7 @@ static struct radeon_asic trinity_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1819,6 +1857,8 @@ static struct radeon_asic trinity_asic = {
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
.enable_bapm = &trinity_dpm_enable_bapm,
+ .get_current_sclk = &trinity_dpm_get_current_sclk,
+ .get_current_mclk = &trinity_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1868,6 +1908,7 @@ static struct radeon_asic si_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
+ .get_allowed_info_register = si_get_allowed_info_register,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1955,6 +1996,8 @@ static struct radeon_asic si_asic = {
.fan_ctrl_get_mode = &si_fan_ctrl_get_mode,
.get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent,
.set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent,
+ .get_current_sclk = &si_dpm_get_current_sclk,
+ .get_current_mclk = &si_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2032,6 +2075,7 @@ static struct radeon_asic ci_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+ .get_allowed_info_register = cik_get_allowed_info_register,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -2123,6 +2167,8 @@ static struct radeon_asic ci_asic = {
.fan_ctrl_get_mode = &ci_fan_ctrl_get_mode,
.get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent,
.set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent,
+ .get_current_sclk = &ci_dpm_get_current_sclk,
+ .get_current_mclk = &ci_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2142,6 +2188,7 @@ static struct radeon_asic kv_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+ .get_allowed_info_register = cik_get_allowed_info_register,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -2229,6 +2276,8 @@ static struct radeon_asic kv_asic = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_current_sclk = &kv_dpm_get_current_sclk,
+ .get_current_mclk = &kv_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 72bdd3bf0d8e..cf0a90bb61ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -384,6 +384,8 @@ u32 r600_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r600_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
@@ -433,6 +435,8 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
struct seq_file *m);
int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
@@ -449,6 +453,8 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
struct seq_file *m);
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -488,6 +494,8 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* evergreen
@@ -540,6 +548,8 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv);
int evergreen_get_temp(struct radeon_device *rdev);
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int sumo_get_temp(struct radeon_device *rdev);
int tn_get_temp(struct radeon_device *rdev);
int cypress_dpm_init(struct radeon_device *rdev);
@@ -563,6 +573,8 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
@@ -581,6 +593,8 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m);
int sumo_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* cayman
@@ -637,6 +651,8 @@ uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cayman_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int ni_dpm_init(struct radeon_device *rdev);
void ni_dpm_setup_asic(struct radeon_device *rdev);
@@ -655,6 +671,8 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
int trinity_dpm_late_enable(struct radeon_device *rdev);
@@ -674,6 +692,8 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -726,6 +746,8 @@ u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int si_get_temp(struct radeon_device *rdev);
+int si_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int si_dpm_init(struct radeon_device *rdev);
void si_dpm_setup_asic(struct radeon_device *rdev);
int si_dpm_enable(struct radeon_device *rdev);
@@ -746,6 +768,8 @@ int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed);
u32 si_fan_ctrl_get_mode(struct radeon_device *rdev);
void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev);
/* DCE8 - CIK */
void dce8_bandwidth_update(struct radeon_device *rdev);
@@ -841,6 +865,8 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
int ci_get_temp(struct radeon_device *rdev);
int kv_get_temp(struct radeon_device *rdev);
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int ci_dpm_init(struct radeon_device *rdev);
int ci_dpm_enable(struct radeon_device *rdev);
@@ -862,6 +888,8 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev);
int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed);
@@ -890,6 +918,8 @@ int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev);
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fc1b3f34cf18..8f285244c839 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -845,6 +845,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
radeon_link_encoder_connector(dev);
+ radeon_setup_mst_connector(dev);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b21ef69a34ac..48d49e651a30 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -520,16 +520,40 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
+ list_for_each_entry(connector,
+ &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return -ENOENT;
+ }
+
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
return err;
}
+ if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+ if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ }
+
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
DRM_ERROR("failed to pack AVI infoframe: %d\n", err);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 27def67cb6be..cebb65e07e1d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -27,6 +27,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
@@ -34,12 +35,33 @@
#include <linux/pm_runtime.h>
+static int radeon_dp_handle_hpd(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_dp_mst_check_status(radeon_connector);
+ if (ret == -EINVAL)
+ return 1;
+ return 0;
+}
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ if (radeon_connector->is_mst_connector)
+ return;
+ if (dig_connector->is_mst) {
+ radeon_dp_handle_hpd(connector);
+ return;
+ }
+ }
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
@@ -135,7 +157,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -225,7 +247,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
@@ -702,7 +724,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
if (connector->encoder)
radeon_encoder = to_radeon_encoder(connector->encoder);
else {
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
}
@@ -725,6 +747,30 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == rdev->mode_info.output_csc_property) {
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ if (radeon_encoder->output_csc == val)
+ return 0;
+
+ radeon_encoder->output_csc = val;
+
+ if (connector->encoder->crtc) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ radeon_crtc->output_csc = radeon_encoder->output_csc;
+
+ (*crtc_funcs->load_lut)(crtc);
+ }
+ }
+
return 0;
}
@@ -896,7 +942,7 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
if (connector->encoder)
radeon_encoder = to_radeon_encoder(connector->encoder);
else {
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
}
@@ -964,7 +1010,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
int r;
@@ -1094,7 +1140,7 @@ static enum drm_connector_status
radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
int r;
@@ -1174,7 +1220,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1585,6 +1631,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;
+ if (radeon_dig_connector->is_mst)
+ return connector_status_disconnected;
+
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0)
return connector_status_disconnected;
@@ -1635,7 +1684,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
@@ -1643,12 +1692,21 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
radeon_dp_getdpcd(radeon_connector);
+ r = radeon_dp_mst_probe(radeon_connector);
+ if (r == 1)
+ ret = connector_status_disconnected;
+ }
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector))
- ret = connector_status_connected;
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ r = radeon_dp_mst_probe(radeon_connector);
+ if (r == 1)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ }
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
@@ -1872,6 +1930,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1904,6 +1966,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1950,6 +2016,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1972,6 +2042,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -2023,6 +2097,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
@@ -2068,6 +2146,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -2116,6 +2198,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -2352,3 +2438,27 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
+
+void radeon_setup_mst_connector(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ if (!ASIC_IS_DCE5(rdev))
+ return;
+
+ if (radeon_mst == 0)
+ return;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int ret;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ ret = radeon_dp_mst_init(radeon_connector);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bd7519fdd3f4..b7ca4c514621 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1442,6 +1442,11 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
}
+ r = radeon_mst_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("registering mst debugfs failed (%d).\n", r);
+ }
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 913fafa597ad..d2e9e9efc159 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -154,7 +154,7 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
- (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) |
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
@@ -1382,6 +1382,13 @@ static struct drm_prop_enum_list radeon_dither_enum_list[] =
{ RADEON_FMT_DITHER_ENABLE, "on" },
};
+static struct drm_prop_enum_list radeon_output_csc_enum_list[] =
+{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" },
+ { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" },
+ { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" },
+ { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1444,6 +1451,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
"dither",
radeon_dither_enum_list, sz);
+ sz = ARRAY_SIZE(radeon_output_csc_enum_list);
+ rdev->mode_info.output_csc_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "output_csc",
+ radeon_output_csc_enum_list, sz);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
new file mode 100644
index 000000000000..bf1fecc6cceb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "nid.h"
+
+#define AUX_RX_ERROR_FLAGS (AUX_SW_RX_OVERFLOW | \
+ AUX_SW_RX_HPD_DISCON | \
+ AUX_SW_RX_PARTIAL_BYTE | \
+ AUX_SW_NON_AUX_MODE | \
+ AUX_SW_RX_MIN_COUNT_VIOL | \
+ AUX_SW_RX_INVALID_STOP | \
+ AUX_SW_RX_SYNC_INVALID_L | \
+ AUX_SW_RX_SYNC_INVALID_H | \
+ AUX_SW_RX_INVALID_START | \
+ AUX_SW_RX_RECV_NO_DET | \
+ AUX_SW_RX_RECV_INVALID_H | \
+ AUX_SW_RX_RECV_INVALID_V)
+
+#define AUX_SW_REPLY_GET_BYTE_COUNT(x) (((x) >> 24) & 0x1f)
+
+#define BARE_ADDRESS_SIZE 3
+
+static const u32 aux_offset[] =
+{
+ 0x6200 - 0x6200,
+ 0x6250 - 0x6200,
+ 0x62a0 - 0x6200,
+ 0x6300 - 0x6200,
+ 0x6350 - 0x6200,
+ 0x63a0 - 0x6200,
+};
+
+ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct radeon_i2c_chan *chan =
+ container_of(aux, struct radeon_i2c_chan, aux);
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ret = 0, i;
+ uint32_t tmp, ack = 0;
+ int instance = chan->rec.i2c_id & 0xf;
+ u8 byte;
+ u8 *buf = msg->buffer;
+ int retry_count = 0;
+ int bytes;
+ int msize;
+ bool is_write = false;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ is_write = true;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* work out two sizes required */
+ msize = 0;
+ bytes = BARE_ADDRESS_SIZE;
+ if (msg->size) {
+ msize = msg->size - 1;
+ bytes++;
+ if (is_write)
+ bytes += msg->size;
+ }
+
+ mutex_lock(&chan->mutex);
+
+ /* switch the pad to aux mode */
+ tmp = RREG32(chan->rec.mask_clk_reg);
+ tmp |= (1 << 16);
+ WREG32(chan->rec.mask_clk_reg, tmp);
+
+ /* setup AUX control register with correct HPD pin */
+ tmp = RREG32(AUX_CONTROL + aux_offset[instance]);
+
+ tmp &= AUX_HPD_SEL(0x7);
+ tmp |= AUX_HPD_SEL(chan->rec.hpd);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
+
+ WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+
+ /* atombios appears to write this twice lets copy it */
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes));
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes));
+
+ /* write the data header into the registers */
+ /* request, addres, msg size */
+ byte = (msg->request << 4);
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
+
+ byte = (msg->address >> 8) & 0xff;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ byte = msg->address & 0xff;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ byte = msize;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ /* if we are writing - write the msg buffer */
+ if (is_write) {
+ for (i = 0; i < msg->size; i++) {
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(buf[i]));
+ }
+ }
+
+ /* clear the ACK */
+ WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+ /* write the size and GO bits */
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes) | AUX_SW_GO);
+
+ /* poll the status registers - TODO irq support */
+ do {
+ tmp = RREG32(AUX_SW_STATUS + aux_offset[instance]);
+ if (tmp & AUX_SW_DONE) {
+ break;
+ }
+ usleep_range(100, 200);
+ } while (retry_count++ < 1000);
+
+ if (retry_count >= 1000) {
+ DRM_ERROR("auxch hw never signalled completion, error %08x\n", tmp);
+ ret = -EIO;
+ goto done;
+ }
+
+ if (tmp & AUX_SW_RX_TIMEOUT) {
+ DRM_DEBUG_KMS("dp_aux_ch timed out\n");
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+ if (tmp & AUX_RX_ERROR_FLAGS) {
+ DRM_DEBUG_KMS("dp_aux_ch flags not zero: %08x\n", tmp);
+ ret = -EIO;
+ goto done;
+ }
+
+ bytes = AUX_SW_REPLY_GET_BYTE_COUNT(tmp);
+ if (bytes) {
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_RW | AUX_SW_AUTOINCREMENT_DISABLE);
+
+ tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+ ack = (tmp >> 8) & 0xff;
+
+ for (i = 0; i < bytes - 1; i++) {
+ tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+ if (buf)
+ buf[i] = (tmp >> 8) & 0xff;
+ }
+ if (buf)
+ ret = bytes - 1;
+ }
+
+ WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+ if (is_write)
+ ret = msg->size;
+done:
+ mutex_unlock(&chan->mutex);
+
+ if (ret >= 0)
+ msg->reply = ack >> 4;
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
new file mode 100644
index 000000000000..1017338a49d9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -0,0 +1,782 @@
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "radeon.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
+
+static int radeon_atom_set_enc_offset(int id)
+{
+ static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ 0x13830 - 0x7030 };
+
+ return offsets[id];
+}
+
+static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary,
+ struct radeon_encoder_mst *mst_enc,
+ enum radeon_hpd_id hpd, bool enable)
+{
+ struct drm_device *dev = primary->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t reg;
+ int retries = 0;
+ uint32_t temp;
+
+ reg = RREG32(NI_DIG_BE_CNTL + primary->offset);
+
+ /* set MST mode */
+ reg &= ~NI_DIG_FE_DIG_MODE(7);
+ reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST);
+
+ if (enable)
+ reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+ else
+ reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+
+ reg |= NI_DIG_HPD_SELECT(hpd);
+ DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg);
+ WREG32(NI_DIG_BE_CNTL + primary->offset, reg);
+
+ if (enable) {
+ uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+
+ do {
+ temp = RREG32(NI_DIG_FE_CNTL + offset);
+ } while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000);
+ if (retries == 10000)
+ DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe);
+ }
+ return 0;
+}
+
+static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
+ int stream_number,
+ int fe,
+ int slots)
+{
+ struct drm_device *dev = primary->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u32 temp, val;
+ int retries = 0;
+ int satreg, satidx;
+
+ satreg = stream_number >> 1;
+ satidx = stream_number & 1;
+
+ temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset);
+
+ val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe);
+
+ val <<= (16 * satidx);
+
+ temp &= ~(0xffff << (16 * satidx));
+
+ temp |= val;
+
+ DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+ WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+
+ WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
+
+ do {
+ temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
+ } while ((temp & 0x1) && retries++ < 10000);
+
+ if (retries == 10000)
+ DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
+
+ /* MTP 16 ? */
+ return 0;
+}
+
+static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn,
+ struct radeon_encoder *primary)
+{
+ struct drm_device *dev = mst_conn->base.dev;
+ struct stream_attribs new_attribs[6];
+ int i;
+ int idx = 0;
+ struct radeon_connector *radeon_connector;
+ struct drm_connector *connector;
+
+ memset(new_attribs, 0, sizeof(new_attribs));
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_encoder *subenc;
+ struct radeon_encoder_mst *mst_enc;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ if (radeon_connector->mst_port != mst_conn)
+ continue;
+
+ subenc = radeon_connector->mst_encoder;
+ mst_enc = subenc->enc_priv;
+
+ if (!mst_enc->enc_active)
+ continue;
+
+ new_attribs[idx].fe = mst_enc->fe;
+ new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port);
+ idx++;
+ }
+
+ for (i = 0; i < idx; i++) {
+ if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe ||
+ new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) {
+ radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots);
+ mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe;
+ mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots;
+ }
+ }
+
+ for (i = idx; i < mst_conn->enabled_attribs; i++) {
+ radeon_dp_mst_set_stream_attrib(primary, i, 0, 0);
+ mst_conn->cur_stream_attribs[i].fe = 0;
+ mst_conn->cur_stream_attribs[i].slots = 0;
+ }
+ mst_conn->enabled_attribs = idx;
+ return 0;
+}
+
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+{
+ struct drm_device *dev = mst->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_mst *mst_enc = mst->enc_priv;
+ uint32_t val, temp;
+ uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+ int retries = 0;
+
+ val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
+
+ WREG32(NI_DP_MSE_RATE_CNTL + offset, val);
+
+ do {
+ temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ } while ((temp & 0x1) && (retries++ < 10000));
+
+ if (retries >= 10000)
+ DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe);
+ return 0;
+}
+
+static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port);
+ radeon_connector->edid = edid;
+ DRM_DEBUG_KMS("edid retrieved %p\n", edid);
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+
+ return ret;
+}
+
+static int radeon_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return radeon_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+radeon_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ return &radeon_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
+ .get_modes = radeon_dp_mst_get_modes,
+ .mode_valid = radeon_dp_mst_mode_valid,
+ .best_encoder = radeon_mst_best_encoder,
+};
+
+static enum drm_connector_status
+radeon_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
+}
+
+static void
+radeon_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder;
+
+ drm_encoder_cleanup(&radeon_encoder->base);
+ kfree(radeon_encoder);
+ drm_connector_cleanup(connector);
+ kfree(radeon_connector);
+}
+
+static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
+ .dpms = radeon_connector_dpms,
+ .detect = radeon_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_dp_mst_connector_destroy,
+};
+
+static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector;
+ struct drm_connector *connector;
+
+ radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return NULL;
+
+ radeon_connector->is_mst_connector = true;
+ connector = &radeon_connector->base;
+ radeon_connector->port = port;
+ radeon_connector->mst_port = master;
+ DRM_DEBUG_KMS("\n");
+
+ drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs);
+ radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+
+ mutex_lock(&dev->mode_config.mutex);
+ radeon_fb_add_connector(rdev, connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_connector_register(connector);
+ return connector;
+}
+
+static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ drm_connector_unregister(connector);
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ /* dpms off */
+ radeon_fb_remove_connector(rdev, connector);
+
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_reinit_primary_mode_group(dev);
+
+
+ kfree(connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = radeon_dp_add_mst_connector,
+ .destroy_connector = radeon_dp_destroy_mst_connector,
+ .hotplug = radeon_dp_mst_hotplug,
+};
+
+struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (!connector->encoder)
+ continue;
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder);
+ if (connector->encoder == encoder)
+ return radeon_connector;
+ }
+ return NULL;
+}
+
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder);
+ struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+ struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base);
+ int dp_clock;
+ struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+ if (radeon_connector) {
+ radeon_connector->pixelclock_for_modeset = mode->clock;
+ if (radeon_connector->base.display_info.bpc)
+ radeon_crtc->bpc = radeon_connector->base.display_info.bpc;
+ else
+ radeon_crtc->bpc = 8;
+ }
+
+ DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock);
+ dp_clock = dig_connector->dp_clock;
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+ ASIC_INTERNAL_SS_ON_DP,
+ dp_clock);
+}
+
+static void
+radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder, *primary;
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder_atom_dig *dig_enc;
+ struct radeon_connector *radeon_connector;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ int ret, slots;
+
+ if (!ASIC_IS_DCE5(rdev)) {
+ DRM_ERROR("got mst dpms on non-DCE5\n");
+ return;
+ }
+
+ radeon_connector = radeon_mst_find_connector(encoder);
+ if (!radeon_connector)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ primary = mst_enc->primary;
+
+ dig_enc = primary->enc_priv;
+
+ crtc = encoder->crtc;
+ DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ dig_enc->active_mst_links++;
+
+ radeon_crtc = to_radeon_crtc(crtc);
+
+ if (dig_enc->active_mst_links == 1) {
+ mst_enc->fe = dig_enc->dig_encoder;
+ mst_enc->fe_from_be = true;
+ atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+
+ atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE,
+ 0, 0, dig_enc->dig_encoder);
+
+ if (radeon_dp_needs_link_train(mst_enc->connector) ||
+ dig_enc->active_mst_links == 1) {
+ radeon_dp_link_train(&primary->base, &mst_enc->connector->base);
+ }
+
+ } else {
+ mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id);
+ if (mst_enc->fe == -1)
+ DRM_ERROR("failed to get frontend for dig encoder\n");
+ mst_enc->fe_from_be = false;
+ atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+ }
+
+ DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
+ dig_enc->linkb, radeon_crtc->crtc_id);
+
+ ret = drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
+ radeon_connector->port,
+ mst_enc->pbn, &slots);
+ ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+ radeon_dp_mst_set_be_cntl(primary, mst_enc,
+ radeon_connector->mst_port->hpd.hpd, true);
+
+ mst_enc->enc_active = true;
+ radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
+ mst_enc->fe);
+ ret = drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links);
+
+ if (!mst_enc->enc_active)
+ return;
+
+ drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+ ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+ drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+
+ mst_enc->enc_active = false;
+ radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+
+ radeon_dp_mst_set_be_cntl(primary, mst_enc,
+ radeon_connector->mst_port->hpd.hpd, false);
+ atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0,
+ mst_enc->fe);
+
+ if (!mst_enc->fe_from_be)
+ radeon_atom_release_dig_encoder(rdev, mst_enc->fe);
+
+ mst_enc->fe_from_be = false;
+ dig_enc->active_mst_links--;
+ if (dig_enc->active_mst_links == 0) {
+ /* drop link */
+ }
+
+ break;
+ }
+
+}
+
+static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int bpp = 24;
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
+ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+ mst_enc->primary->active_device, mst_enc->primary->devices,
+ mst_enc->connector->devices, mst_enc->primary->base.encoder_type);
+
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ {
+ struct radeon_connector_atom_dig *dig_connector;
+
+ dig_connector = mst_enc->connector->con_priv;
+ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+ dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
+ dig_connector->dpcd);
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
+ }
+ return true;
+}
+
+static void radeon_mst_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_connector *radeon_connector;
+ struct radeon_encoder *radeon_encoder, *primary;
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder_atom_dig *dig_enc;
+
+ radeon_connector = radeon_mst_find_connector(encoder);
+ if (!radeon_connector) {
+ DRM_DEBUG_KMS("failed to find connector %p\n", encoder);
+ return;
+ }
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ primary = mst_enc->primary;
+
+ dig_enc = primary->enc_priv;
+
+ mst_enc->port = radeon_connector->port;
+
+ if (dig_enc->dig_encoder == -1) {
+ dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1);
+ primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder);
+ atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder);
+
+
+ }
+ DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset);
+}
+
+static void
+radeon_mst_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_mst_encoder_commit(struct drm_encoder *encoder)
+{
+ radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ DRM_DEBUG_KMS("\n");
+}
+
+static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
+ .dpms = radeon_mst_encoder_dpms,
+ .mode_fixup = radeon_mst_mode_fixup,
+ .prepare = radeon_mst_encoder_prepare,
+ .mode_set = radeon_mst_encoder_mode_set,
+ .commit = radeon_mst_encoder_commit,
+};
+
+void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = {
+ .destroy = radeon_dp_mst_encoder_destroy,
+};
+
+static struct radeon_encoder *
+radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_mst *mst_enc;
+ struct drm_encoder *encoder;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private;
+ struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base);
+
+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+ radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return NULL;
+
+ radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL);
+ if (!radeon_encoder->enc_priv) {
+ kfree(radeon_encoder);
+ return NULL;
+ }
+ encoder = &radeon_encoder->base;
+ switch (rdev->num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+ drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
+
+ mst_enc = radeon_encoder->enc_priv;
+ mst_enc->connector = connector;
+ mst_enc->primary = to_radeon_encoder(enc_master);
+ radeon_encoder->is_mst_encoder = true;
+ return radeon_encoder;
+}
+
+int
+radeon_dp_mst_init(struct radeon_connector *radeon_connector)
+{
+ struct drm_device *dev = radeon_connector->base.dev;
+
+ if (!radeon_connector->ddc_bus->has_aux)
+ return 0;
+
+ radeon_connector->mst_mgr.cbs = &mst_cbs;
+ return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev->dev,
+ &radeon_connector->ddc_bus->aux, 16, 6,
+ radeon_connector->base.base.id);
+}
+
+int
+radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int ret;
+ u8 msg[1];
+
+ if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
+ return 0;
+
+ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg,
+ 1);
+ if (ret) {
+ if (msg[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ dig_connector->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ dig_connector->is_mst = false;
+ }
+
+ }
+ drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+ dig_connector->is_mst);
+ return dig_connector->is_mst;
+}
+
+int
+radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int retry;
+
+ if (dig_connector->is_mst) {
+ u8 esi[16] = { 0 };
+ int dret;
+ int ret = 0;
+ bool handled;
+
+ dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI, esi, 8);
+go_again:
+ if (dret == 8) {
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ if (wret == 3)
+ break;
+ }
+
+ dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI, esi, 8);
+ if (dret == 8) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret);
+ dig_connector->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+ dig_connector->is_mst);
+ /* send a hotplug event */
+ }
+ }
+ return -EINVAL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_mst_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ radeon_connector = to_radeon_connector(connector);
+ dig_connector = radeon_connector->con_priv;
+ if (radeon_connector->is_mst_connector)
+ continue;
+ if (!dig_connector->is_mst)
+ continue;
+ drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr);
+
+ for (i = 0; i < radeon_connector->enabled_attribs; i++)
+ seq_printf(m, "attrib %d: %d %d\n", i,
+ radeon_connector->cur_stream_attribs[i].fe,
+ radeon_connector->cur_stream_attribs[i].slots);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_mst_list[] = {
+ {"radeon_mst_info", &radeon_debugfs_mst_info, 0, NULL},
+};
+#endif
+
+int radeon_mst_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_mst_list, 1);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5d684beb48d3..7d620d4b3f31 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -89,9 +89,10 @@
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
* CS to GPU on >= r600
* 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
+ * 2.42.0 - Add VCE/VUI (Video Usability Information) support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 41
+#define KMS_DRIVER_MINOR 42
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -190,6 +191,8 @@ int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
int radeon_backlight = -1;
+int radeon_auxch = -1;
+int radeon_mst = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -239,7 +242,7 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, radeon_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
@@ -275,6 +278,12 @@ module_param_named(bapm, radeon_bapm, int, 0444);
MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(backlight, radeon_backlight, int, 0444);
+MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(auxch, radeon_auxch, int, 0444);
+
+MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
+module_param_named(mst, radeon_mst, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3a297037cc17..ef99917f000d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -247,7 +247,16 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->active_device & radeon_connector->devices)
+ if (radeon_encoder->is_mst_encoder) {
+ struct radeon_encoder_mst *mst_enc;
+
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ mst_enc = radeon_encoder->enc_priv;
+ if (mst_enc->connector == radeon_connector->mst_port)
+ return connector;
+ } else if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -393,6 +402,9 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (radeon_connector->is_mst_connector)
+ return false;
+
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ea276ff6d174..aeb676708e60 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
+ info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
@@ -434,3 +435,13 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
+
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 00fc59762e0d..7162c935371c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -87,6 +87,20 @@ static void radeon_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
+static void radeon_dp_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ dp_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ /* this should take a mutex */
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
@@ -276,6 +290,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 122eb5693ba1..3db23007cdf4 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -103,15 +103,14 @@ static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
#if defined(CONFIG_HSA_AMD_MODULE)
- bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
- const struct kgd2kfd_calls**);
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
if (kgd2kfd_init_p == NULL)
return false;
- if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
symbol_put(kgd2kfd_init);
kgd2kfd = NULL;
@@ -120,7 +119,7 @@ bool radeon_kfd_init(void)
return true;
#elif defined(CONFIG_HSA_AMD)
- if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
kgd2kfd = NULL;
return false;
@@ -143,7 +142,8 @@ void radeon_kfd_fini(void)
void radeon_kfd_device_probe(struct radeon_device *rdev)
{
if (kgd2kfd)
- rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+ rdev->pdev, &kfd2kgd);
}
void radeon_kfd_device_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 686411e4e4f6..7b2a7335cc5d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -547,6 +547,35 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
else
*value = 1;
break;
+ case RADEON_INFO_CURRENT_GPU_TEMP:
+ /* get temperature in millidegrees C */
+ if (rdev->asic->pm.get_temperature)
+ *value = radeon_get_temperature(rdev);
+ else
+ *value = 0;
+ break;
+ case RADEON_INFO_CURRENT_GPU_SCLK:
+ /* get sclk in Mhz */
+ if (rdev->pm.dpm_enabled)
+ *value = radeon_dpm_get_current_sclk(rdev) / 100;
+ else
+ *value = rdev->pm.current_sclk / 100;
+ break;
+ case RADEON_INFO_CURRENT_GPU_MCLK:
+ /* get mclk in Mhz */
+ if (rdev->pm.dpm_enabled)
+ *value = radeon_dpm_get_current_mclk(rdev) / 100;
+ else
+ *value = rdev->pm.current_mclk / 100;
+ break;
+ case RADEON_INFO_READ_REG:
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (radeon_get_allowed_info_register(rdev, *value, value))
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index c89971d904c3..45715307db71 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -36,7 +36,7 @@
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 572b4dbec186..01701376b239 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -53,6 +53,11 @@ struct radeon_mn {
struct rb_root objects;
};
+struct radeon_mn_node {
+ struct interval_tree_node it;
+ struct list_head bos;
+};
+
/**
* radeon_mn_destroy - destroy the rmn
*
@@ -64,14 +69,21 @@ static void radeon_mn_destroy(struct work_struct *work)
{
struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
struct radeon_device *rdev = rmn->rdev;
- struct radeon_bo *bo, *next;
+ struct radeon_mn_node *node, *next_node;
+ struct radeon_bo *bo, *next_bo;
mutex_lock(&rdev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
- interval_tree_remove(&bo->mn_it, &rmn->objects);
- bo->mn = NULL;
+ rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
+ it.rb) {
+
+ interval_tree_remove(&node->it, &rmn->objects);
+ list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+ bo->mn = NULL;
+ list_del_init(&bo->mn_list);
+ }
+ kfree(node);
}
mutex_unlock(&rmn->lock);
mutex_unlock(&rdev->mn_lock);
@@ -121,29 +133,33 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
+ struct radeon_mn_node *node;
struct radeon_bo *bo;
int r;
- bo = container_of(it, struct radeon_bo, mn_it);
+ node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%d) failed to reserve user bo\n", r);
- continue;
- }
+ list_for_each_entry(bo, &node->bos, mn_list) {
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
- false, MAX_SCHEDULE_TIMEOUT);
- if (r)
- DRM_ERROR("(%d) failed to wait for user bo\n", r);
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%d) failed to reserve user bo\n", r);
+ continue;
+ }
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%d) failed to validate user bo\n", r);
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false, MAX_SCHEDULE_TIMEOUT);
+ if (r)
+ DRM_ERROR("(%d) failed to wait for user bo\n", r);
- radeon_bo_unreserve(bo);
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%d) failed to validate user bo\n", r);
+
+ radeon_bo_unreserve(bo);
+ }
}
mutex_unlock(&rmn->lock);
@@ -220,24 +236,44 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
unsigned long end = addr + radeon_bo_size(bo) - 1;
struct radeon_device *rdev = bo->rdev;
struct radeon_mn *rmn;
+ struct radeon_mn_node *node = NULL;
+ struct list_head bos;
struct interval_tree_node *it;
rmn = radeon_mn_get(rdev);
if (IS_ERR(rmn))
return PTR_ERR(rmn);
+ INIT_LIST_HEAD(&bos);
+
mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, addr, end);
- if (it) {
- mutex_unlock(&rmn->lock);
- return -EEXIST;
+ while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+ kfree(node);
+ node = container_of(it, struct radeon_mn_node, it);
+ interval_tree_remove(&node->it, &rmn->objects);
+ addr = min(it->start, addr);
+ end = max(it->last, end);
+ list_splice(&node->bos, &bos);
+ }
+
+ if (!node) {
+ node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
+ if (!node) {
+ mutex_unlock(&rmn->lock);
+ return -ENOMEM;
+ }
}
bo->mn = rmn;
- bo->mn_it.start = addr;
- bo->mn_it.last = end;
- interval_tree_insert(&bo->mn_it, &rmn->objects);
+
+ node->it.start = addr;
+ node->it.last = end;
+ INIT_LIST_HEAD(&node->bos);
+ list_splice(&bos, &node->bos);
+ list_add(&bo->mn_list, &node->bos);
+
+ interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock);
@@ -255,6 +291,7 @@ void radeon_mn_unregister(struct radeon_bo *bo)
{
struct radeon_device *rdev = bo->rdev;
struct radeon_mn *rmn;
+ struct list_head *head;
mutex_lock(&rdev->mn_lock);
rmn = bo->mn;
@@ -264,8 +301,19 @@ void radeon_mn_unregister(struct radeon_bo *bo)
}
mutex_lock(&rmn->lock);
- interval_tree_remove(&bo->mn_it, &rmn->objects);
+ /* save the next list entry for later */
+ head = bo->mn_list.next;
+
bo->mn = NULL;
+ list_del(&bo->mn_list);
+
+ if (list_empty(head)) {
+ struct radeon_mn_node *node;
+ node = container_of(head, struct radeon_mn_node, bos);
+ interval_tree_remove(&node->it, &rmn->objects);
+ kfree(node);
+ }
+
mutex_unlock(&rmn->lock);
mutex_unlock(&rdev->mn_lock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 920a8be8abad..fa91a17b81b6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <linux/i2c.h>
@@ -85,6 +86,13 @@ enum radeon_hpd_id {
RADEON_HPD_NONE = 0xff,
};
+enum radeon_output_csc {
+ RADEON_OUTPUT_CSC_BYPASS = 0,
+ RADEON_OUTPUT_CSC_TVRGB = 1,
+ RADEON_OUTPUT_CSC_YCBCR601 = 2,
+ RADEON_OUTPUT_CSC_YCBCR709 = 3,
+};
+
#define RADEON_MAX_I2C_BUS 16
/* radeon gpio-based i2c
@@ -255,6 +263,8 @@ struct radeon_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Output CSC */
+ struct drm_property *output_csc_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -265,6 +275,9 @@ struct radeon_mode_info {
u16 firmware_flags;
/* pointer to backlight encoder */
struct radeon_encoder *bl_encoder;
+
+ /* bitmask for active encoder frontends */
+ uint32_t active_encoders;
};
#define RADEON_MAX_BL_LEVEL 0xFF
@@ -357,6 +370,7 @@ struct radeon_crtc {
u32 wm_low;
u32 wm_high;
struct drm_display_mode hw_mode;
+ enum radeon_output_csc output_csc;
};
struct radeon_encoder_primary_dac {
@@ -426,12 +440,24 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ int active_mst_links;
};
struct radeon_encoder_atom_dac {
enum radeon_tv_std tv_std;
};
+struct radeon_encoder_mst {
+ int crtc;
+ struct radeon_encoder *primary;
+ struct radeon_connector *connector;
+ struct drm_dp_mst_port *port;
+ int pbn;
+ int fe;
+ bool fe_from_be;
+ bool enc_active;
+};
+
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
@@ -450,6 +476,11 @@ struct radeon_encoder {
bool is_ext_encoder;
u16 caps;
struct radeon_audio_funcs *audio;
+ enum radeon_output_csc output_csc;
+ bool can_mst;
+ uint32_t offset;
+ bool is_mst_encoder;
+ /* front end for this mst encoder */
};
struct radeon_connector_atom_dig {
@@ -460,6 +491,7 @@ struct radeon_connector_atom_dig {
int dp_clock;
int dp_lane_count;
bool edp_on;
+ bool is_mst;
};
struct radeon_gpio_rec {
@@ -503,6 +535,11 @@ enum radeon_connector_dither {
RADEON_FMT_DITHER_ENABLE = 1,
};
+struct stream_attribs {
+ uint16_t fe;
+ uint16_t slots;
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -524,6 +561,14 @@ struct radeon_connector {
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
int pixelclock_for_modeset;
+ bool is_mst_connector;
+ struct radeon_connector *mst_port;
+ struct drm_dp_mst_port *port;
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
+ struct radeon_encoder *mst_encoder;
+ struct stream_attribs cur_stream_attribs[6];
+ int enabled_attribs;
};
struct radeon_framebuffer {
@@ -708,15 +753,26 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
+int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 *dpcd);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
+extern ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg);
+
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
+extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set, int fe);
+extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
+ int fe);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
@@ -929,7 +985,23 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
+
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+
+/* mst */
+int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
+int radeon_mst_debugfs_init(struct radeon_device *rdev);
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
+
+void radeon_setup_mst_connector(struct drm_device *dev);
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 976fe432f4e2..24f849f888bb 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -571,6 +571,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
case 0x04000005: // rate control
case 0x04000007: // motion estimation
case 0x04000008: // rdo
+ case 0x04000009: // vui
break;
case 0x03000001: // encode
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 9031f4b69824..cb0afe78abed 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -1001,6 +1001,28 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
ps->sclk_high, ps->max_voltage);
}
+/* get the current sclk in 10 khz units */
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
+ u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+ u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
+ u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
+ ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
+ u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
+ (post_div * ref_div);
+
+ return sclk;
+}
+
+/* get the current mclk in 10 khz units */
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct igp_power_info *pi = rs780_get_pi(rdev);
+
+ return pi->bootup_uma_clk;
+}
+
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 6a5c233361e9..97e5a6f1ce58 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -2050,6 +2050,52 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
}
}
+/* get the current sclk in 10 khz units */
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+ struct rv6xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+/* get the current mclk in 10 khz units */
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+ struct rv6xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
+ }
+}
+
void rv6xx_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 306732641b23..b9c770745a7a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2492,6 +2492,50 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
}
}
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
+ }
+}
+
void rv770_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a7fb2735d4a9..b1d74bc375d8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1264,6 +1264,36 @@ static void si_init_golden_registers(struct radeon_device *rdev)
}
}
+/**
+ * si_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int si_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+ case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -6055,12 +6085,12 @@ int si_irq_set(struct radeon_device *rdev)
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
if (!ASIC_IS_NODCE(rdev)) {
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
}
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -6123,27 +6153,27 @@ int si_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("si_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("si_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("si_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("si_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("si_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("si_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -6306,6 +6336,37 @@ static inline void si_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
}
static void si_irq_disable(struct radeon_device *rdev)
@@ -6371,6 +6432,7 @@ int si_irq_process(struct radeon_device *rdev)
u32 src_id, src_data, ring_id;
u32 ring_index;
bool queue_hotplug = false;
+ bool queue_dp = false;
bool queue_thermal = false;
u32 status, addr;
@@ -6611,6 +6673,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -6693,6 +6797,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 7be11651b7e6..b35bccfeef79 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6993,3 +6993,39 @@ void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
}
+
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->mclk;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 99a9835c9f61..3afac3013983 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1556,6 +1556,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
+#define UVD_STATUS 0xf6bc
#define UVD_CGC_CTRL 0xF4B0
# define DCM (1 << 0)
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 25fd4ced36c8..cd0862809adf 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1837,6 +1837,34 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
}
}
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
+ struct sumo_ps *ps = sumo_get_ps(rps);
+ struct sumo_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >>
+ CURR_INDEX_SHIFT;
+
+ if (current_index == BOOST_DPM_LEVEL) {
+ pl = &pi->boost_pl;
+ return pl->sclk;
+ } else if (current_index >= ps->num_levels) {
+ return 0;
+ } else {
+ pl = &ps->levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void sumo_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 38dacb7a3689..a5b02c575d77 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1964,6 +1964,31 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
}
}
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
+ struct trinity_ps *ps = trinity_get_ps(rps);
+ struct trinity_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >>
+ CURRENT_STATE_SHIFT;
+
+ if (current_index >= ps->num_levels) {
+ return 0;
+ } else {
+ pl = &ps->levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void trinity_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 25c7a998fc2c..7d0b8ef9bea2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -15,6 +15,8 @@
#include <linux/mutex.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -99,9 +101,13 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
clk_disable_unprepare(rcrtc->clock);
}
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
- const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
@@ -187,9 +193,19 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
rcdu->dpad0_source = rcrtc->index;
}
-void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
+static unsigned int plane_zpos(struct rcar_du_plane *plane)
+{
+ return to_rcar_du_plane_state(plane->plane.state)->zpos;
+}
+
+static const struct rcar_du_format_info *
+plane_format(struct rcar_du_plane *plane)
+{
+ return to_rcar_du_plane_state(plane->plane.state)->format;
+}
+
+static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
unsigned int num_planes = 0;
unsigned int prio = 0;
@@ -201,29 +217,30 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
unsigned int j;
- if (plane->crtc != &rcrtc->crtc || !plane->enabled)
+ if (plane->plane.state->crtc != &rcrtc->crtc)
continue;
/* Insert the plane in the sorted planes array. */
for (j = num_planes++; j > 0; --j) {
- if (planes[j-1]->zpos <= plane->zpos)
+ if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
break;
planes[j] = planes[j-1];
}
planes[j] = plane;
- prio += plane->format->planes * 4;
+ prio += plane_format(plane)->planes * 4;
}
for (i = 0; i < num_planes; ++i) {
struct rcar_du_plane *plane = planes[i];
- unsigned int index = plane->hwindex;
+ struct drm_plane_state *state = plane->plane.state;
+ unsigned int index = to_rcar_du_plane_state(state)->hwindex;
prio -= 4;
dspr |= (index + 1) << prio;
dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index);
- if (plane->format->planes == 2) {
+ if (plane_format(plane)->planes == 2) {
index = (index + 1) % 8;
prio -= 4;
@@ -236,8 +253,6 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* with superposition controller 2.
*/
if (rcrtc->index % 2) {
- u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
-
/* The DPTSR register is updated when the display controller is
* stopped. We thus need to restart the DU. Once again, sorry
* for the flicker. One way to mitigate the issue would be to
@@ -245,29 +260,104 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* split, or through a module parameter). Flicker would then
* occur only if we need to break the pre-association.
*/
- if (value != dptsr) {
+ mutex_lock(&rcrtc->group->lock);
+ if (rcar_du_group_read(rcrtc->group, DPTSR) != dptsr) {
rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
if (rcrtc->group->used_crtcs)
rcar_du_group_restart(rcrtc->group);
}
+ mutex_unlock(&rcrtc->group->lock);
}
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
dspr);
}
+/* -----------------------------------------------------------------------------
+ * Page Flip
+ */
+
+void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+ struct drm_file *file)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ if (event && event->base.file_priv == file) {
+ rcrtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_crtc_vblank_put(&rcrtc->crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ rcrtc->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (event == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_send_vblank_event(dev, rcrtc->index, event);
+ wake_up(&rcrtc->flip_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_crtc_vblank_put(&rcrtc->crtc);
+}
+
+static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
+{
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = rcrtc->event != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
+static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+
+ if (wait_event_timeout(rcrtc->flip_wait,
+ !rcar_du_crtc_page_flip_pending(rcrtc),
+ msecs_to_jiffies(50)))
+ return;
+
+ dev_warn(rcdu->dev, "page flip timeout\n");
+
+ rcar_du_crtc_finish_page_flip(rcrtc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Start/Stop and Suspend/Resume
+ */
+
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
bool interlaced;
- unsigned int i;
if (rcrtc->started)
return;
- if (WARN_ON(rcrtc->plane->format == NULL))
- return;
-
/* Set display off and background to black */
rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
@@ -276,20 +366,8 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcar_du_crtc_set_display_timing(rcrtc);
rcar_du_group_set_routing(rcrtc->group);
- mutex_lock(&rcrtc->group->planes.lock);
- rcrtc->plane->enabled = true;
- rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcrtc->group->planes.lock);
-
- /* Setup planes. */
- for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
-
- if (plane->crtc != crtc || !plane->enabled)
- continue;
-
- rcar_du_plane_setup(plane);
- }
+ /* Start with all planes disabled. */
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Select master sync mode. This enables display operation in master
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
@@ -302,6 +380,9 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcar_du_group_start_stop(rcrtc->group, true);
+ /* Turn vertical blanking interrupt reporting back on. */
+ drm_crtc_vblank_on(crtc);
+
rcrtc->started = true;
}
@@ -312,10 +393,12 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (!rcrtc->started)
return;
- mutex_lock(&rcrtc->group->planes.lock);
- rcrtc->plane->enabled = false;
- rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcrtc->group->planes.lock);
+ /* Disable vertical blanking interrupt reporting. We first need to wait
+ * for page flip completion before stopping the CRTC as userspace
+ * expects page flips to eventually complete.
+ */
+ rcar_du_crtc_wait_page_flip(rcrtc);
+ drm_crtc_vblank_off(crtc);
/* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
@@ -335,196 +418,109 @@ void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
- if (rcrtc->dpms != DRM_MODE_DPMS_ON)
+ unsigned int i;
+
+ if (!rcrtc->enabled)
return;
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
-}
-
-static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
-{
- struct drm_crtc *crtc = &rcrtc->crtc;
-
- rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
- rcar_du_plane_update_base(rcrtc->plane);
-}
-
-static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ /* Commit the planes state. */
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
- if (rcrtc->dpms == mode)
- return;
+ if (plane->plane.state->crtc != &rcrtc->crtc)
+ continue;
- if (mode == DRM_MODE_DPMS_ON) {
- rcar_du_crtc_get(rcrtc);
- rcar_du_crtc_start(rcrtc);
- } else {
- rcar_du_crtc_stop(rcrtc);
- rcar_du_crtc_put(rcrtc);
+ rcar_du_plane_setup(plane);
}
- rcrtc->dpms = mode;
+ rcar_du_crtc_update_planes(rcrtc);
}
-static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* TODO Fixup modes */
- return true;
-}
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
+ */
-static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
+static void rcar_du_crtc_enable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- /* We need to access the hardware during mode set, acquire a reference
- * to the CRTC.
- */
- rcar_du_crtc_get(rcrtc);
+ if (rcrtc->enabled)
+ return;
- /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
- * result.
- */
- rcar_du_crtc_stop(rcrtc);
- rcar_du_plane_release(rcrtc->plane);
+ rcar_du_crtc_get(rcrtc);
+ rcar_du_crtc_start(rcrtc);
- rcrtc->dpms = DRM_MODE_DPMS_OFF;
+ rcrtc->enabled = true;
}
-static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void rcar_du_crtc_disable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
- const struct rcar_du_format_info *format;
- int ret;
-
- format = rcar_du_format_info(crtc->primary->fb->pixel_format);
- if (format == NULL) {
- dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
- crtc->primary->fb->pixel_format);
- ret = -EINVAL;
- goto error;
- }
- ret = rcar_du_plane_reserve(rcrtc->plane, format);
- if (ret < 0)
- goto error;
-
- rcrtc->plane->format = format;
-
- rcrtc->plane->src_x = x;
- rcrtc->plane->src_y = y;
- rcrtc->plane->width = mode->hdisplay;
- rcrtc->plane->height = mode->vdisplay;
+ if (!rcrtc->enabled)
+ return;
- rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
+ rcar_du_crtc_stop(rcrtc);
+ rcar_du_crtc_put(rcrtc);
+ rcrtc->enabled = false;
rcrtc->outputs = 0;
-
- return 0;
-
-error:
- /* There's no rollback/abort operation to clean up in case of error. We
- * thus need to release the reference to the CRTC acquired in prepare()
- * here.
- */
- rcar_du_crtc_put(rcrtc);
- return ret;
}
-static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc)
+static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
-
- /* We're done, restart the CRTC and set the DPMS mode to on. The
- * reference to the DU acquired at prepare() time will thus be released
- * by the DPMS handler (possibly called by the disable() handler).
- */
- rcar_du_crtc_start(rcrtc);
- rcrtc->dpms = DRM_MODE_DPMS_ON;
+ /* TODO Fixup modes */
+ return true;
}
-static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
- rcrtc->plane->src_x = x;
- rcrtc->plane->src_y = y;
-
- rcar_du_crtc_update_base(rcrtc);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- return 0;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ rcrtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
-static void rcar_du_crtc_disable(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- rcar_du_plane_release(rcrtc->plane);
+ rcar_du_crtc_update_planes(rcrtc);
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .dpms = rcar_du_crtc_dpms,
.mode_fixup = rcar_du_crtc_mode_fixup,
- .prepare = rcar_du_crtc_mode_prepare,
- .commit = rcar_du_crtc_mode_commit,
- .mode_set = rcar_du_crtc_mode_set,
- .mode_set_base = rcar_du_crtc_mode_set_base,
.disable = rcar_du_crtc_disable,
+ .enable = rcar_du_crtc_enable,
+ .atomic_begin = rcar_du_crtc_atomic_begin,
+ .atomic_flush = rcar_du_crtc_atomic_flush,
};
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- if (event && event->base.file_priv == file) {
- rcrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_vblank_put(dev, rcrtc->index);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- rcrtc->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (event == NULL)
- return;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, rcrtc->index, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+static const struct drm_crtc_funcs crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
- drm_vblank_put(dev, rcrtc->index);
-}
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
@@ -544,41 +540,9 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
return ret;
}
-static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (rcrtc->event != NULL) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return -EBUSY;
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- crtc->primary->fb = fb;
- rcar_du_crtc_update_base(rcrtc);
-
- if (event) {
- event->pipe = rcrtc->index;
- drm_vblank_get(dev, rcrtc->index);
- spin_lock_irqsave(&dev->event_lock, flags);
- rcrtc->event = event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- return 0;
-}
-
-static const struct drm_crtc_funcs crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = rcar_du_crtc_page_flip,
-};
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
{
@@ -620,20 +584,24 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
return -EPROBE_DEFER;
}
+ init_waitqueue_head(&rcrtc->flip_wait);
+
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
- rcrtc->dpms = DRM_MODE_DPMS_OFF;
- rcrtc->plane = &rgrp->planes.planes[index % 2];
-
- rcrtc->plane->crtc = crtc;
+ rcrtc->enabled = false;
- ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs);
+ ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
+ &rgrp->planes.planes[index % 2].plane,
+ NULL, &crtc_funcs);
if (ret < 0)
return ret;
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+ /* Start with vertical blanking interrupt reporting disabled. */
+ drm_crtc_vblank_off(crtc);
+
/* Register the interrupt handler. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
irq = platform_get_irq(pdev, index);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index d2f89f7d2e5e..5d9aa9b33769 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,12 +15,12 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/wait.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
struct rcar_du_group;
-struct rcar_du_plane;
struct rcar_du_crtc {
struct drm_crtc crtc;
@@ -32,11 +32,12 @@ struct rcar_du_crtc {
bool started;
struct drm_pending_vblank_event *event;
+ wait_queue_head_t flip_wait;
+
unsigned int outputs;
- int dpms;
+ bool enabled;
struct rcar_du_group *group;
- struct rcar_du_plane *plane;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
@@ -59,6 +60,5 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
enum rcar_du_output output);
-void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e0d74f821416..da1216a73969 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/wait.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -163,6 +164,8 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
}
+ init_waitqueue_head(&rcdu->commit.wait);
+
rcdu->dev = &pdev->dev;
rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
: (void *)platform_get_device_id(pdev)->driver_data;
@@ -175,17 +178,19 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
- /* DRM/KMS objects */
- ret = rcar_du_modeset_init(rcdu);
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
goto done;
}
- /* vblank handling */
- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+ /* DRM/KMS objects */
+ ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
+ dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
goto done;
}
@@ -247,7 +252,8 @@ static const struct file_operations rcar_du_fops = {
};
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
+ | DRIVER_ATOMIC,
.load = rcar_du_load,
.unload = rcar_du_unload,
.preclose = rcar_du_preclose,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index c5b9ea6a7eaa..c7c538dd2e68 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
+#include <linux/wait.h>
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -64,6 +65,10 @@ struct rcar_du_device_info {
unsigned int num_lvds;
};
+#define RCAR_DU_MAX_CRTCS 3
+#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
+#define RCAR_DU_MAX_LVDS 2
+
struct rcar_du_device {
struct device *dev;
const struct rcar_du_device_info *info;
@@ -73,13 +78,18 @@ struct rcar_du_device {
struct drm_device *ddev;
struct drm_fbdev_cma *fbdev;
- struct rcar_du_crtc crtcs[3];
+ struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
- struct rcar_du_group groups[2];
+ struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
unsigned int dpad0_source;
- struct rcar_du_lvdsenc *lvds[2];
+ struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
+
+ struct {
+ wait_queue_head_t wait;
+ u32 pending;
+ } commit;
};
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 279167f783f6..d0ae1e8009c6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -42,46 +42,40 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
* Encoder
*/
-static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void rcar_du_encoder_disable(struct drm_encoder *encoder)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ if (renc->lvds)
+ rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
+}
+
+static void rcar_du_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
+ rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
}
-static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ const struct drm_display_mode *mode = &crtc_state->mode;
const struct drm_display_mode *panel_mode;
+ struct drm_connector *connector = conn_state->connector;
struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
- bool found = false;
/* DAC encoders have currently no restriction on the mode. */
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
- return true;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_dbg(dev->dev, "mode_fixup: no connector found\n");
- return false;
- }
+ return 0;
if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
- return false;
+ dev_dbg(dev->dev, "encoder: empty modes list\n");
+ return -EINVAL;
}
panel_mode = list_first_entry(&connector->modes,
@@ -90,7 +84,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
/* We're not allowed to modify the resolution. */
if (mode->hdisplay != panel_mode->hdisplay ||
mode->vdisplay != panel_mode->vdisplay)
- return false;
+ return -EINVAL;
/* The flat panel mode is fixed, just copy it to the adjusted mode. */
drm_mode_copy(adjusted_mode, panel_mode);
@@ -102,25 +96,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->clock = clamp(adjusted_mode->clock,
30000, 150000);
- return true;
-}
-
-static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
- DRM_MODE_DPMS_OFF);
-}
-
-static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
- DRM_MODE_DPMS_ON);
+ return 0;
}
static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
@@ -133,11 +109,10 @@ static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_encoder_dpms,
- .mode_fixup = rcar_du_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
.mode_set = rcar_du_encoder_mode_set,
+ .disable = rcar_du_encoder_disable,
+ .enable = rcar_du_encoder_enable,
+ .atomic_check = rcar_du_encoder_atomic_check,
};
static const struct drm_encoder_funcs encoder_funcs = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 0c38cdcda4ca..ed36433fbe84 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -14,6 +14,8 @@
#ifndef __RCAR_DU_GROUP_H__
#define __RCAR_DU_GROUP_H__
+#include <linux/mutex.h>
+
#include "rcar_du_plane.h"
struct rcar_du_device;
@@ -25,6 +27,7 @@ struct rcar_du_device;
* @index: group index
* @use_count: number of users of the group (rcar_du_group_(get|put))
* @used_crtcs: number of CRTCs currently in use
+ * @lock: protects the DPTSR register
* @planes: planes handled by the group
*/
struct rcar_du_group {
@@ -35,6 +38,8 @@ struct rcar_du_group {
unsigned int use_count;
unsigned int used_crtcs;
+ struct mutex lock;
+
struct rcar_du_planes planes;
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index ca94b029ac80..96f2eb43713c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
@@ -74,10 +75,13 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_hdmi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
@@ -108,7 +112,7 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -116,7 +120,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 221f0a17fd6a..81da8419282b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -26,41 +26,50 @@
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
struct device *dev;
- int dpms;
+ bool enabled;
};
#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
-static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
+static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
- if (hdmienc->dpms == mode)
- return;
+ if (hdmienc->renc->lvds)
+ rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
+ false);
- if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
- rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+ hdmienc->enabled = false;
+}
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, mode);
+static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (hdmienc->renc->lvds)
+ rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
+ true);
- if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
- rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
- hdmienc->dpms = mode;
+ hdmienc->enabled = true;
}
-static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ const struct drm_display_mode *mode = &crtc_state->mode;
/* The internal LVDS encoder has a clock frequency operating range of
* 30MHz to 150MHz. Clamp the clock accordingly.
@@ -70,19 +79,9 @@ static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
30000, 150000);
if (sfuncs->mode_fixup == NULL)
- return true;
-
- return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
-}
+ return 0;
-static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
-{
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
-{
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
+ return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
}
static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
@@ -99,18 +98,18 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_hdmienc_dpms,
- .mode_fixup = rcar_du_hdmienc_mode_fixup,
- .prepare = rcar_du_hdmienc_mode_prepare,
- .commit = rcar_du_hdmienc_mode_commit,
.mode_set = rcar_du_hdmienc_mode_set,
+ .disable = rcar_du_hdmienc_disable,
+ .enable = rcar_du_hdmienc_enable,
+ .atomic_check = rcar_du_hdmienc_atomic_check,
};
static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (hdmienc->enabled)
+ rcar_du_hdmienc_disable(encoder);
drm_encoder_cleanup(encoder);
put_device(hdmienc->dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 68dab2601bf5..93117f159a3b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -12,12 +12,15 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <linux/of_graph.h>
+#include <linux/wait.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
@@ -185,9 +188,309 @@ static void rcar_du_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(rcdu->fbdev);
}
+/* -----------------------------------------------------------------------------
+ * Atomic Check and Update
+ */
+
+/*
+ * Atomic hardware plane allocator
+ *
+ * The hardware plane allocator is solely based on the atomic plane states
+ * without keeping any external state to avoid races between .atomic_check()
+ * and .atomic_commit().
+ *
+ * The core idea is to avoid using a free planes bitmask that would need to be
+ * shared between check and commit handlers with a collective knowledge based on
+ * the allocated hardware plane(s) for each KMS plane. The allocator then loops
+ * over all plane states to compute the free planes bitmask, allocates hardware
+ * planes based on that bitmask, and stores the result back in the plane states.
+ *
+ * For this to work we need to access the current state of planes not touched by
+ * the atomic update. To ensure that it won't be modified, we need to lock all
+ * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
+ * updates from .atomic_check() up to completion (when swapping the states if
+ * the check step has succeeded) or rollback (when freeing the states if the
+ * check step has failed).
+ *
+ * Allocation is performed in the .atomic_check() handler and applied
+ * automatically when the core swaps the old and new states.
+ */
+
+static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
+ struct rcar_du_plane_state *state)
+{
+ const struct rcar_du_format_info *cur_format;
+
+ cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
+
+ /* Lowering the number of planes doesn't strictly require reallocation
+ * as the extra hardware plane will be freed when committing, but doing
+ * so could lead to more fragmentation.
+ */
+ return !cur_format || cur_format->planes != state->format->planes;
+}
+
+static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
+{
+ unsigned int mask;
+
+ if (state->hwindex == -1)
+ return 0;
+
+ mask = 1 << state->hwindex;
+ if (state->format->planes == 2)
+ mask |= 1 << ((state->hwindex + 1) % 8);
+
+ return mask;
+}
+
+static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
+{
+ unsigned int i;
+
+ for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
+ if (!(free & (1 << i)))
+ continue;
+
+ if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
+ break;
+ }
+
+ return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
+}
+
+static int rcar_du_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+ unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ bool needs_realloc = false;
+ unsigned int groups = 0;
+ unsigned int i;
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret < 0)
+ return ret;
+
+ /* Check if hardware planes need to be reallocated. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ unsigned int index;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_du_plane_state(state->plane_states[i]);
+
+ /* If the plane is being disabled we don't need to go through
+ * the full reallocation procedure. Just mark the hardware
+ * plane(s) as freed.
+ */
+ if (!plane_state->format) {
+ index = plane - plane->group->planes.planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ continue;
+ }
+
+ /* If the plane needs to be reallocated mark it as such, and
+ * mark the hardware plane(s) as free.
+ */
+ if (rcar_du_plane_needs_realloc(plane, plane_state)) {
+ groups |= 1 << plane->group->index;
+ needs_realloc = true;
+
+ index = plane - plane->group->planes.planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ }
+ }
+
+ if (!needs_realloc)
+ return 0;
+
+ /* Grab all plane states for the groups that need reallocation to ensure
+ * locking and avoid racy updates. This serializes the update operation,
+ * but there's not much we can do about it as that's the hardware
+ * design.
+ *
+ * Compute the used planes mask for each group at the same time to avoid
+ * looping over the planes separately later.
+ */
+ while (groups) {
+ unsigned int index = ffs(groups) - 1;
+ struct rcar_du_group *group = &rcdu->groups[index];
+ unsigned int used_planes = 0;
+
+ for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
+ struct rcar_du_plane *plane = &group->planes.planes[i];
+ struct rcar_du_plane_state *plane_state;
+ struct drm_plane_state *s;
+
+ s = drm_atomic_get_plane_state(state, &plane->plane);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ /* If the plane has been freed in the above loop its
+ * hardware planes must not be added to the used planes
+ * bitmask. However, the current state doesn't reflect
+ * the free state yet, as we've modified the new state
+ * above. Use the local freed planes list to check for
+ * that condition instead.
+ */
+ if (group_freed_planes[index] & (1 << i))
+ continue;
+
+ plane_state = to_rcar_du_plane_state(plane->plane.state);
+ used_planes |= rcar_du_plane_hwmask(plane_state);
+ }
+
+ group_free_planes[index] = 0xff & ~used_planes;
+ groups &= ~(1 << index);
+ }
+
+ /* Reallocate hardware planes for each plane that needs it. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ int idx;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_du_plane_state(state->plane_states[i]);
+
+ /* Skip planes that are being disabled or don't need to be
+ * reallocated.
+ */
+ if (!plane_state->format ||
+ !rcar_du_plane_needs_realloc(plane, plane_state))
+ continue;
+
+ idx = rcar_du_plane_hwalloc(plane_state->format->planes,
+ group_free_planes[plane->group->index]);
+ if (idx < 0) {
+ dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
+ __func__);
+ return idx;
+ }
+
+ plane_state->hwindex = idx;
+
+ group_free_planes[plane->group->index] &=
+ ~rcar_du_plane_hwmask(plane_state);
+ }
+
+ return 0;
+}
+
+struct rcar_du_commit {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ u32 crtcs;
+};
+
+static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
+{
+ struct drm_device *dev = commit->dev;
+ struct rcar_du_device *rcdu = dev->dev_private;
+ struct drm_atomic_state *old_state = commit->state;
+
+ /* Apply the atomic update. */
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_planes(dev, old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+
+ drm_atomic_state_free(old_state);
+
+ /* Complete the commit, wake up any waiter. */
+ spin_lock(&rcdu->commit.wait.lock);
+ rcdu->commit.pending &= ~commit->crtcs;
+ wake_up_all_locked(&rcdu->commit.wait);
+ spin_unlock(&rcdu->commit.wait.lock);
+
+ kfree(commit);
+}
+
+static void rcar_du_atomic_work(struct work_struct *work)
+{
+ struct rcar_du_commit *commit =
+ container_of(work, struct rcar_du_commit, work);
+
+ rcar_du_atomic_complete(commit);
+}
+
+static int rcar_du_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_commit *commit;
+ unsigned int i;
+ int ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Allocate the commit object. */
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (commit == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&commit->work, rcar_du_atomic_work);
+ commit->dev = dev;
+ commit->state = state;
+
+ /* Wait until all affected CRTCs have completed previous commits and
+ * mark them as pending.
+ */
+ for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+ if (state->crtcs[i])
+ commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+ }
+
+ spin_lock(&rcdu->commit.wait.lock);
+ ret = wait_event_interruptible_locked(rcdu->commit.wait,
+ !(rcdu->commit.pending & commit->crtcs));
+ if (ret == 0)
+ rcdu->commit.pending |= commit->crtcs;
+ spin_unlock(&rcdu->commit.wait.lock);
+
+ if (ret) {
+ kfree(commit);
+ return ret;
+ }
+
+ /* Swap the state, this is the point of no return. */
+ drm_atomic_helper_swap_state(dev, state);
+
+ if (async)
+ schedule_work(&commit->work);
+ else
+ rcar_du_atomic_complete(commit);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
.output_poll_changed = rcar_du_output_poll_changed,
+ .atomic_check = rcar_du_atomic_check,
+ .atomic_commit = rcar_du_atomic_commit,
};
static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
@@ -224,12 +527,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
- while (1) {
- ep_node = of_graph_get_next_endpoint(entity, ep_node);
-
- if (!ep_node)
- break;
-
+ for_each_endpoint_of_node(entity, ep_node) {
if (ep_node == entity_ep_node)
continue;
@@ -296,24 +594,19 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
- struct device_node *ep_node = NULL;
+ struct device_node *ep_node;
unsigned int num_encoders = 0;
/*
* Iterate over the endpoints and create one encoder for each output
* pipeline.
*/
- while (1) {
+ for_each_endpoint_of_node(np, ep_node) {
enum rcar_du_output output;
struct of_endpoint ep;
unsigned int i;
int ret;
- ep_node = of_graph_get_next_endpoint(np, ep_node);
-
- if (ep_node == NULL)
- break;
-
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret < 0) {
of_node_put(ep_node);
@@ -385,6 +678,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
for (i = 0; i < num_groups; ++i) {
struct rcar_du_group *rgrp = &rcdu->groups[i];
+ mutex_init(&rgrp->lock);
+
rgrp->dev = rcdu;
rgrp->mmio_offset = mmio_offsets[i];
rgrp->index = i;
@@ -432,27 +727,21 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
- /* Now that the CRTCs have been initialized register the planes. */
- for (i = 0; i < num_groups; ++i) {
- ret = rcar_du_planes_register(&rcdu->groups[i]);
- if (ret < 0)
- return ret;
- }
+ drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- drm_helper_disable_unused_functions(dev);
+ if (dev->mode_config.num_connector) {
+ fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
- fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
- rcdu->fbdev = fbdev;
+ rcdu->fbdev = fbdev;
+ } else {
+ dev_info(rcdu->dev,
+ "no connector found, disabling fbdev emulation\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6d9811c052c4..0c43032fc693 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -74,10 +75,13 @@ rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_lvds_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
@@ -117,7 +121,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -125,7 +129,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
lvdscon->connector.encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 7cfb48ce1791..85043c5bad03 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -28,7 +28,7 @@ struct rcar_du_lvdsenc {
unsigned int index;
void __iomem *mmio;
struct clk *clock;
- int dpms;
+ bool enabled;
enum rcar_lvds_input input;
};
@@ -48,7 +48,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
u32 pllcr;
int ret;
- if (lvds->dpms == DRM_MODE_DPMS_ON)
+ if (lvds->enabled)
return 0;
ret = clk_prepare_enable(lvds->clock);
@@ -110,13 +110,13 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
- lvds->dpms = DRM_MODE_DPMS_ON;
+ lvds->enabled = true;
return 0;
}
static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
{
- if (lvds->dpms == DRM_MODE_DPMS_OFF)
+ if (!lvds->enabled)
return;
rcar_lvds_write(lvds, LVDCR0, 0);
@@ -124,13 +124,13 @@ static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
clk_disable_unprepare(lvds->clock);
- lvds->dpms = DRM_MODE_DPMS_OFF;
+ lvds->enabled = false;
}
-int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode)
+int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc,
+ bool enable)
{
- if (mode == DRM_MODE_DPMS_OFF) {
+ if (!enable) {
rcar_du_lvdsenc_stop(lvds);
return 0;
} else if (crtc) {
@@ -179,7 +179,7 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
lvds->dev = rcdu;
lvds->index = i;
lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
- lvds->dpms = DRM_MODE_DPMS_OFF;
+ lvds->enabled = false;
ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
if (ret < 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index f65aabda0796..9a6001c07303 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -28,15 +28,15 @@ enum rcar_lvds_input {
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
-int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode);
+int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, bool enable);
#else
static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
{
return 0;
}
-static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode)
+static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, bool enable)
{
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 50f2f2b20d39..210e5c3fd982 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -12,10 +12,12 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
@@ -26,16 +28,6 @@
#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
#define RCAR_DU_COLORKEY_MASK (1 << 24)
-struct rcar_du_kms_plane {
- struct drm_plane plane;
- struct rcar_du_plane *hwplane;
-};
-
-static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
-{
- return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
-}
-
static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
unsigned int index, u32 reg)
{
@@ -50,74 +42,31 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp,
data);
}
-int rcar_du_plane_reserve(struct rcar_du_plane *plane,
- const struct rcar_du_format_info *format)
-{
- struct rcar_du_group *rgrp = plane->group;
- unsigned int i;
- int ret = -EBUSY;
-
- mutex_lock(&rgrp->planes.lock);
-
- for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
- if (!(rgrp->planes.free & (1 << i)))
- continue;
-
- if (format->planes == 1 ||
- rgrp->planes.free & (1 << ((i + 1) % 8)))
- break;
- }
-
- if (i == ARRAY_SIZE(rgrp->planes.planes))
- goto done;
-
- rgrp->planes.free &= ~(1 << i);
- if (format->planes == 2)
- rgrp->planes.free &= ~(1 << ((i + 1) % 8));
-
- plane->hwindex = i;
-
- ret = 0;
-
-done:
- mutex_unlock(&rgrp->planes.lock);
- return ret;
-}
-
-void rcar_du_plane_release(struct rcar_du_plane *plane)
-{
- struct rcar_du_group *rgrp = plane->group;
-
- if (plane->hwindex == -1)
- return;
-
- mutex_lock(&rgrp->planes.lock);
- rgrp->planes.free |= 1 << plane->hwindex;
- if (plane->format->planes == 2)
- rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
- mutex_unlock(&rgrp->planes.lock);
-
- plane->hwindex = -1;
-}
-
-void rcar_du_plane_update_base(struct rcar_du_plane *plane)
+static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
+ struct drm_framebuffer *fb = plane->plane.state->fb;
struct rcar_du_group *rgrp = plane->group;
- unsigned int index = plane->hwindex;
+ unsigned int src_x = state->state.src_x >> 16;
+ unsigned int src_y = state->state.src_y >> 16;
+ unsigned int index = state->hwindex;
+ struct drm_gem_cma_object *gem;
bool interlaced;
u32 mwr;
- interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+ interlaced = state->state.crtc->state->adjusted_mode.flags
+ & DRM_MODE_FLAG_INTERLACE;
/* Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
- if (plane->format->planes == 2)
- mwr = plane->pitch;
+ if (state->format->planes == 2)
+ mwr = fb->pitches[0];
else
- mwr = plane->pitch * 8 / plane->format->bpp;
+ mwr = fb->pitches[0] * 8 / state->format->bpp;
- if (interlaced && plane->format->bpp == 32)
+ if (interlaced && state->format->bpp == 32)
mwr *= 2;
rcar_du_plane_write(rgrp, index, PnMWR, mwr);
@@ -134,42 +83,33 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
* require a halved Y position value, in both progressive and interlaced
* modes.
*/
- rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (!interlaced && plane->format->bpp == 32 ? 2 : 1));
- rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
+ rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
+ (!interlaced && state->format->bpp == 32 ? 2 : 1));
- if (plane->format->planes == 2) {
- index = (index + 1) % 8;
-
- rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch);
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]);
- rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (plane->format->bpp == 16 ? 2 : 1) / 2);
- rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
- }
-}
+ if (state->format->planes == 2) {
+ index = (index + 1) % 8;
-void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
- struct drm_framebuffer *fb)
-{
- struct drm_gem_cma_object *gem;
+ rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]);
- plane->pitch = fb->pitches[0];
+ rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
+ (state->format->bpp == 16 ? 2 : 1) / 2);
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- plane->dma[0] = gem->paddr + fb->offsets[0];
-
- if (plane->format->planes == 2) {
gem = drm_fb_cma_get_gem_obj(fb, 1);
- plane->dma[1] = gem->paddr + fb->offsets[1];
+ rcar_du_plane_write(rgrp, index, PnDSA0R,
+ gem->paddr + fb->offsets[1]);
}
}
static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
unsigned int index)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
struct rcar_du_group *rgrp = plane->group;
u32 colorkey;
u32 pnmr;
@@ -183,47 +123,47 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
* For XRGB, set the alpha value to the plane-wide alpha value and
* enable alpha-blending regardless of the X bit value.
*/
- if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
+ if (state->format->fourcc != DRM_FORMAT_XRGB1555)
rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
else
rcar_du_plane_write(rgrp, index, PnALPHAR,
- PnALPHAR_ABIT_X | plane->alpha);
+ PnALPHAR_ABIT_X | state->alpha);
- pnmr = PnMR_BM_MD | plane->format->pnmr;
+ pnmr = PnMR_BM_MD | state->format->pnmr;
/* Disable color keying when requested. YUV formats have the
* PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
* automatically.
*/
- if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
+ if ((state->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
pnmr |= PnMR_SPIM_TP_OFF;
/* For packed YUV formats we need to select the U/V order. */
- if (plane->format->fourcc == DRM_FORMAT_YUYV)
+ if (state->format->fourcc == DRM_FORMAT_YUYV)
pnmr |= PnMR_YCDF_YUYV;
rcar_du_plane_write(rgrp, index, PnMR, pnmr);
- switch (plane->format->fourcc) {
+ switch (state->format->fourcc) {
case DRM_FORMAT_RGB565:
- colorkey = ((plane->colorkey & 0xf80000) >> 8)
- | ((plane->colorkey & 0x00fc00) >> 5)
- | ((plane->colorkey & 0x0000f8) >> 3);
+ colorkey = ((state->colorkey & 0xf80000) >> 8)
+ | ((state->colorkey & 0x00fc00) >> 5)
+ | ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- colorkey = ((plane->colorkey & 0xf80000) >> 9)
- | ((plane->colorkey & 0x00f800) >> 6)
- | ((plane->colorkey & 0x0000f8) >> 3);
+ colorkey = ((state->colorkey & 0xf80000) >> 9)
+ | ((state->colorkey & 0x00f800) >> 6)
+ | ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
rcar_du_plane_write(rgrp, index, PnTC3R,
- PnTC3R_CODE | (plane->colorkey & 0xffffff));
+ PnTC3R_CODE | (state->colorkey & 0xffffff));
break;
}
}
@@ -231,6 +171,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
unsigned int index)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
@@ -242,17 +184,17 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
*/
ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
ddcr4 &= ~PnDDCR4_EDF_MASK;
- ddcr4 |= plane->format->edf | PnDDCR4_CODE;
+ ddcr4 |= state->format->edf | PnDDCR4_CODE;
rcar_du_plane_setup_mode(plane, index);
- if (plane->format->planes == 2) {
- if (plane->hwindex != index) {
- if (plane->format->fourcc == DRM_FORMAT_NV12 ||
- plane->format->fourcc == DRM_FORMAT_NV21)
+ if (state->format->planes == 2) {
+ if (state->hwindex != index) {
+ if (state->format->fourcc == DRM_FORMAT_NV12 ||
+ state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_Y420;
- if (plane->format->fourcc == DRM_FORMAT_NV21)
+ if (state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_NV21;
ddcr2 |= PnDDCR2_DIVU;
@@ -265,10 +207,10 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
/* Destination position and size */
- rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
- rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
- rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
- rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
+ rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w);
+ rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h);
+ rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x);
+ rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y);
/* Wrap-around and blinking, disabled */
rcar_du_plane_write(rgrp, index, PnWASPR, 0);
@@ -279,150 +221,143 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
void rcar_du_plane_setup(struct rcar_du_plane *plane)
{
- __rcar_du_plane_setup(plane, plane->hwindex);
- if (plane->format->planes == 2)
- __rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8);
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
+
+ __rcar_du_plane_setup(plane, state->hwindex);
+ if (state->format->planes == 2)
+ __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8);
- rcar_du_plane_update_base(plane);
+ rcar_du_plane_setup_fb(plane);
}
-static int
-rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static int rcar_du_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
+ struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_device *rcdu = rplane->group->dev;
- const struct rcar_du_format_info *format;
- unsigned int nplanes;
- int ret;
- format = rcar_du_format_info(fb->pixel_format);
- if (format == NULL) {
- dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
- fb->pixel_format);
- return -EINVAL;
+ if (!state->fb || !state->crtc) {
+ rstate->format = NULL;
+ return 0;
}
- if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h) {
dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
return -EINVAL;
}
- nplanes = rplane->format ? rplane->format->planes : 0;
-
- /* Reallocate hardware planes if the number of required planes has
- * changed.
- */
- if (format->planes != nplanes) {
- rcar_du_plane_release(rplane);
- ret = rcar_du_plane_reserve(rplane, format);
- if (ret < 0)
- return ret;
+ rstate->format = rcar_du_format_info(state->fb->pixel_format);
+ if (rstate->format == NULL) {
+ dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+ state->fb->pixel_format);
+ return -EINVAL;
}
- rplane->crtc = crtc;
- rplane->format = format;
-
- rplane->src_x = src_x >> 16;
- rplane->src_y = src_y >> 16;
- rplane->dst_x = crtc_x;
- rplane->dst_y = crtc_y;
- rplane->width = crtc_w;
- rplane->height = crtc_h;
-
- rcar_du_plane_compute_base(rplane, fb);
- rcar_du_plane_setup(rplane);
-
- mutex_lock(&rplane->group->planes.lock);
- rplane->enabled = true;
- rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rplane->group->planes.lock);
-
return 0;
}
-static int rcar_du_plane_disable(struct drm_plane *plane)
+static void rcar_du_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct rcar_du_plane *rplane = to_rcar_plane(plane);
- if (!rplane->enabled)
- return 0;
+ if (plane->state->crtc)
+ rcar_du_plane_setup(rplane);
+}
- mutex_lock(&rplane->group->planes.lock);
- rplane->enabled = false;
- rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rplane->group->planes.lock);
+static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
+ .atomic_check = rcar_du_plane_atomic_check,
+ .atomic_update = rcar_du_plane_atomic_update,
+};
- rcar_du_plane_release(rplane);
+static void rcar_du_plane_reset(struct drm_plane *plane)
+{
+ struct rcar_du_plane_state *state;
- rplane->crtc = NULL;
- rplane->format = NULL;
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
- return 0;
-}
+ kfree(plane->state);
+ plane->state = NULL;
-/* Both the .set_property and the .update_plane operations are called with the
- * mode_config lock held. There is this no need to explicitly protect access to
- * the alpha and colorkey fields and the mode register.
- */
-static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha)
-{
- if (plane->alpha == alpha)
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
return;
- plane->alpha = alpha;
- if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555)
- return;
+ state->hwindex = -1;
+ state->alpha = 255;
+ state->colorkey = RCAR_DU_COLORKEY_NONE;
+ state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
- rcar_du_plane_setup_mode(plane, plane->hwindex);
+ plane->state = &state->state;
+ plane->state->plane = plane;
}
-static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
- u32 colorkey)
+static struct drm_plane_state *
+rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- if (plane->colorkey == colorkey)
- return;
+ struct rcar_du_plane_state *state;
+ struct rcar_du_plane_state *copy;
- plane->colorkey = colorkey;
- if (!plane->enabled)
- return;
+ state = to_rcar_du_plane_state(plane->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (copy == NULL)
+ return NULL;
+
+ if (copy->state.fb)
+ drm_framebuffer_reference(copy->state.fb);
- rcar_du_plane_setup_mode(plane, plane->hwindex);
+ return &copy->state;
}
-static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
- unsigned int zpos)
+static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- mutex_lock(&plane->group->planes.lock);
- if (plane->zpos == zpos)
- goto done;
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
- plane->zpos = zpos;
- if (!plane->enabled)
- goto done;
+ kfree(to_rcar_du_plane_state(state));
+}
+
+static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
+ struct rcar_du_plane *rplane = to_rcar_plane(plane);
+ struct rcar_du_group *rgrp = rplane->group;
- rcar_du_crtc_update_planes(plane->crtc);
+ if (property == rgrp->planes.alpha)
+ rstate->alpha = val;
+ else if (property == rgrp->planes.colorkey)
+ rstate->colorkey = val;
+ else if (property == rgrp->planes.zpos)
+ rstate->zpos = val;
+ else
+ return -EINVAL;
-done:
- mutex_unlock(&plane->group->planes.lock);
+ return 0;
}
-static int rcar_du_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value)
+static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
{
+ const struct rcar_du_plane_state *rstate =
+ container_of(state, const struct rcar_du_plane_state, state);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_group *rgrp = rplane->group;
if (property == rgrp->planes.alpha)
- rcar_du_plane_set_alpha(rplane, value);
+ *val = rstate->alpha;
else if (property == rgrp->planes.colorkey)
- rcar_du_plane_set_colorkey(rplane, value);
+ *val = rstate->colorkey;
else if (property == rgrp->planes.zpos)
- rcar_du_plane_set_zpos(rplane, value);
+ *val = rstate->zpos;
else
return -EINVAL;
@@ -430,10 +365,15 @@ static int rcar_du_plane_set_property(struct drm_plane *plane,
}
static const struct drm_plane_funcs rcar_du_plane_funcs = {
- .update_plane = rcar_du_plane_update,
- .disable_plane = rcar_du_plane_disable,
- .set_property = rcar_du_plane_set_property,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = rcar_du_plane_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state,
+ .atomic_destroy_state = rcar_du_plane_atomic_destroy_state,
+ .atomic_set_property = rcar_du_plane_atomic_set_property,
+ .atomic_get_property = rcar_du_plane_atomic_get_property,
};
static const uint32_t formats[] = {
@@ -453,10 +393,11 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
{
struct rcar_du_planes *planes = &rgrp->planes;
struct rcar_du_device *rcdu = rgrp->dev;
+ unsigned int num_planes;
+ unsigned int num_crtcs;
+ unsigned int crtcs;
unsigned int i;
-
- mutex_init(&planes->lock);
- planes->free = 0xff;
+ int ret;
planes->alpha =
drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
@@ -478,45 +419,34 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
if (planes->zpos == NULL)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
- struct rcar_du_plane *plane = &planes->planes[i];
-
- plane->group = rgrp;
- plane->hwindex = -1;
- plane->alpha = 255;
- plane->colorkey = RCAR_DU_COLORKEY_NONE;
- plane->zpos = 0;
- }
-
- return 0;
-}
-
-int rcar_du_planes_register(struct rcar_du_group *rgrp)
-{
- struct rcar_du_planes *planes = &rgrp->planes;
- struct rcar_du_device *rcdu = rgrp->dev;
- unsigned int crtcs;
- unsigned int i;
- int ret;
+ /* Create one primary plane per in this group CRTC and seven overlay
+ * planes.
+ */
+ num_crtcs = min(rcdu->num_crtcs - 2 * rgrp->index, 2U);
+ num_planes = num_crtcs + 7;
crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
- for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
- struct rcar_du_kms_plane *plane;
-
- plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL);
- if (plane == NULL)
- return -ENOMEM;
+ for (i = 0; i < num_planes; ++i) {
+ enum drm_plane_type type = i < num_crtcs
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct rcar_du_plane *plane = &planes->planes[i];
- plane->hwplane = &planes->planes[i + 2];
- plane->hwplane->zpos = 1;
+ plane->group = rgrp;
- ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats), false);
+ ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
+ &rcar_du_plane_funcs, formats,
+ ARRAY_SIZE(formats), type);
if (ret < 0)
return ret;
+ drm_plane_helper_add(&plane->plane,
+ &rcar_du_plane_helper_funcs);
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
drm_object_attach_property(&plane->plane.base,
planes->alpha, 255);
drm_object_attach_property(&plane->plane.base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 3021288b1a89..abff0ebeb195 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,68 +14,57 @@
#ifndef __RCAR_DU_PLANE_H__
#define __RCAR_DU_PLANE_H__
-#include <linux/mutex.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
struct rcar_du_format_info;
struct rcar_du_group;
-/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
- * using KMS planes requires at least one of the CRTCs being enabled, no more
- * than 7 KMS planes can be available. We thus create 7 KMS planes and
- * 9 software planes (one for each KMS planes and one for each CRTC).
+/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * As using overlay planes requires at least one of the CRTCs being enabled, no
+ * more than 7 overlay planes can be available. We thus create 1 primary plane
+ * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
*/
-
-#define RCAR_DU_NUM_KMS_PLANES 7
+#define RCAR_DU_NUM_KMS_PLANES 9
#define RCAR_DU_NUM_HW_PLANES 8
-#define RCAR_DU_NUM_SW_PLANES 9
struct rcar_du_plane {
+ struct drm_plane plane;
struct rcar_du_group *group;
- struct drm_crtc *crtc;
-
- bool enabled;
-
- int hwindex; /* 0-based, -1 means unused */
- unsigned int alpha;
- unsigned int colorkey;
- unsigned int zpos;
-
- const struct rcar_du_format_info *format;
-
- unsigned long dma[2];
- unsigned int pitch;
-
- unsigned int width;
- unsigned int height;
-
- unsigned int src_x;
- unsigned int src_y;
- unsigned int dst_x;
- unsigned int dst_y;
};
+static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct rcar_du_plane, plane);
+}
+
struct rcar_du_planes {
- struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
- unsigned int free;
- struct mutex lock;
+ struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
struct drm_property *alpha;
struct drm_property *colorkey;
struct drm_property *zpos;
};
+struct rcar_du_plane_state {
+ struct drm_plane_state state;
+
+ const struct rcar_du_format_info *format;
+ int hwindex; /* 0-based, -1 means unused */
+
+ unsigned int alpha;
+ unsigned int colorkey;
+ unsigned int zpos;
+};
+
+static inline struct rcar_du_plane_state *
+to_rcar_du_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct rcar_du_plane_state, state);
+}
+
int rcar_du_planes_init(struct rcar_du_group *rgrp);
-int rcar_du_planes_register(struct rcar_du_group *rgrp);
void rcar_du_plane_setup(struct rcar_du_plane *plane);
-void rcar_du_plane_update_base(struct rcar_du_plane *plane);
-void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
- struct drm_framebuffer *fb);
-int rcar_du_plane_reserve(struct rcar_du_plane *plane,
- const struct rcar_du_format_info *format);
-void rcar_du_plane_release(struct rcar_du_plane *plane);
#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d4879921cc7..e0a5d8f93963 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -43,10 +44,13 @@ rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_vga_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
@@ -76,7 +80,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -84,7 +88,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index d236faa05b19..80d6fc8a5cee 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -133,12 +133,12 @@ static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
}
};
-static const struct dw_hdmi_sym_term rockchip_sym_term[] = {
- /*pixelclk symbol term*/
- { 74250000, 0x8009, 0x0004 },
- { 148500000, 0x8029, 0x0004 },
- { 297000000, 0x8039, 0x0005 },
- { ~0UL, 0x0000, 0x0000 }
+static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
+ /*pixelclk symbol term vlev*/
+ { 74250000, 0x8009, 0x0004, 0x0272},
+ { 148500000, 0x802b, 0x0004, 0x028d},
+ { 297000000, 0x8039, 0x0005, 0x028d},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
};
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
@@ -230,7 +230,7 @@ static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
- .sym_term = rockchip_sym_term,
+ .phy_config = rockchip_phy_config,
.dev_type = RK3288_HDMI,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 21a481b224eb..3962176ee713 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -129,6 +129,7 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
struct rockchip_drm_private *private;
struct dma_iommu_mapping *mapping;
struct device *dev = drm_dev->dev;
+ struct drm_connector *connector;
int ret;
private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
@@ -171,6 +172,23 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_detach_device;
+ /*
+ * All components are now added, we can publish the connector sysfs
+ * entries to userspace. This will generate hotplug events and so
+ * userspace will expect to be able to access DRM at this point.
+ */
+ list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
+ head) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ dev_err(drm_dev->dev,
+ "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
+ connector->base.id,
+ connector->name, ret);
+ goto err_unbind;
+ }
+ }
+
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -200,6 +218,7 @@ err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
+err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
arm_iommu_detach_device(dev);
@@ -366,7 +385,7 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
int rockchip_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct drm_crtc *crtc = encoder->crtc;
struct of_endpoint endpoint;
struct device_node *port;
@@ -375,18 +394,15 @@ int rockchip_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !crtc)
return -EINVAL;
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ?: endpoint.id;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a5d889a8716b..5b0dc0f6fd94 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -71,7 +71,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- rk_obj = rockchip_gem_create_object(dev, size);
+ rk_obj = rockchip_gem_create_object(dev, size, true);
if (IS_ERR(rk_obj))
return -ENOMEM;
@@ -106,7 +106,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fb = helper->fb;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
@@ -119,6 +119,9 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
fb->width, fb->height, fb->depth, rk_obj->kvaddr,
offset, size);
+
+ fbi->skip_vt_switch = true;
+
return 0;
err_drm_framebuffer_unref:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7ca8799ef784..eb2282cc4a56 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -22,7 +22,8 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
@@ -30,7 +31,9 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
init_dma_attrs(&rk_obj->dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
- /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+ if (!alloc_kmap)
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
&rk_obj->dma_attrs);
@@ -103,7 +106,8 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
@@ -119,7 +123,7 @@ struct rockchip_gem_object *
drm_gem_private_object_init(drm, obj, size);
- ret = rockchip_gem_alloc_buf(rk_obj);
+ ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
goto err_free_rk_obj;
@@ -163,7 +167,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret;
- rk_obj = rockchip_gem_create_object(drm, size);
+ rk_obj = rockchip_gem_create_object(drm, size, false);
if (IS_ERR(rk_obj))
return ERR_CAST(rk_obj);
@@ -282,6 +286,9 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+ return NULL;
+
return rk_obj->kvaddr;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 67bcebe90003..ad22618473a4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -41,7 +41,8 @@ int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma);
struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap);
void rockchip_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 9a5c571b95fc..ccb0ce073ef2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -81,7 +81,7 @@ struct vop {
struct drm_crtc crtc;
struct device *dev;
struct drm_device *drm_dev;
- unsigned int dpms;
+ bool is_enabled;
int connector_type;
int connector_out_mode;
@@ -89,6 +89,7 @@ struct vop {
/* mutex vsync_ work */
struct mutex vsync_mutex;
bool vsync_work_pending;
+ struct completion dsp_hold_completion;
const struct vop_data *data;
@@ -382,11 +383,50 @@ static bool is_alpha_support(uint32_t format)
}
}
+static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
+ DSP_HOLD_VALID_INTR_EN(1));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
+ DSP_HOLD_VALID_INTR_EN(0));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
static void vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
int ret;
+ if (vop->is_enabled)
+ return;
+
+ ret = pm_runtime_get_sync(vop->dev);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return;
+ }
+
ret = clk_enable(vop->hclk);
if (ret < 0) {
dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
@@ -417,6 +457,11 @@ static void vop_enable(struct drm_crtc *crtc)
goto err_disable_aclk;
}
+ /*
+ * At here, vop clock & iommu is enable, R/W vop regs would be safe.
+ */
+ vop->is_enabled = true;
+
spin_lock(&vop->reg_lock);
VOP_CTRL_SET(vop, standby, 0);
@@ -441,28 +486,44 @@ static void vop_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- drm_vblank_off(crtc->dev, vop->pipe);
+ if (!vop->is_enabled)
+ return;
- disable_irq(vop->irq);
+ drm_vblank_off(crtc->dev, vop->pipe);
/*
- * TODO: Since standby doesn't take effect until the next vblank,
- * when we turn off dclk below, the vop is probably still active.
+ * Vop standby will take effect at end of current frame,
+ * if dsp hold valid irq happen, it means standby complete.
+ *
+ * we must wait standby complete when we want to disable aclk,
+ * if not, memory bus maybe dead.
*/
+ reinit_completion(&vop->dsp_hold_completion);
+ vop_dsp_hold_valid_irq_enable(vop);
+
spin_lock(&vop->reg_lock);
VOP_CTRL_SET(vop, standby, 1);
spin_unlock(&vop->reg_lock);
+
+ wait_for_completion(&vop->dsp_hold_completion);
+
+ vop_dsp_hold_valid_irq_disable(vop);
+
+ disable_irq(vop->irq);
+
+ vop->is_enabled = false;
+
/*
- * disable dclk to stop frame scan, so we can safely detach iommu,
+ * vop standby complete, so iommu detach is safe.
*/
- clk_disable(vop->dclk);
-
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+ clk_disable(vop->dclk);
clk_disable(vop->aclk);
clk_disable(vop->hclk);
+ pm_runtime_put(vop->dev);
}
/*
@@ -742,7 +803,7 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
unsigned long flags;
- if (vop->dpms != DRM_MODE_DPMS_ON)
+ if (!vop->is_enabled)
return -EPERM;
spin_lock_irqsave(&vop->irq_lock, flags);
@@ -759,8 +820,9 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
unsigned long flags;
- if (vop->dpms != DRM_MODE_DPMS_ON)
+ if (!vop->is_enabled)
return;
+
spin_lock_irqsave(&vop->irq_lock, flags);
vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -773,15 +835,8 @@ static const struct rockchip_crtc_funcs private_crtc_funcs = {
static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct vop *vop = to_vop(crtc);
-
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
- if (vop->dpms == mode) {
- DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
- return;
- }
-
switch (mode) {
case DRM_MODE_DPMS_ON:
vop_enable(crtc);
@@ -795,8 +850,6 @@ static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
break;
}
-
- vop->dpms = mode;
}
static void vop_crtc_prepare(struct drm_crtc *crtc)
@@ -847,7 +900,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
- int ret;
+ int ret, ret_clk;
uint32_t val;
/*
@@ -869,13 +922,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
default:
DRM_ERROR("unsupport connector_type[%d]\n",
vop->connector_type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
};
VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
val = 0x8;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
VOP_CTRL_SET(vop, pin_pol, val);
VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
@@ -892,7 +946,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
ret = vop_crtc_mode_set_base(crtc, x, y, fb);
if (ret)
- return ret;
+ goto out;
/*
* reset dclk, take all mode config affect, so the clk would run in
@@ -903,13 +957,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
reset_control_deassert(vop->dclk_rst);
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
- ret = clk_enable(vop->dclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
- return ret;
+out:
+ ret_clk = clk_enable(vop->dclk);
+ if (ret_clk < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk);
+ return ret_clk;
}
- return 0;
+ return ret;
}
static void vop_crtc_commit(struct drm_crtc *crtc)
@@ -934,9 +989,9 @@ static int vop_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
int ret;
- /* when the page flip is requested, crtc's dpms should be on */
- if (vop->dpms > DRM_MODE_DPMS_ON) {
- DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+ /* when the page flip is requested, crtc should be on */
+ if (!vop->is_enabled) {
+ DRM_DEBUG("page flip request rejected because crtc is off.\n");
return 0;
}
@@ -1081,6 +1136,7 @@ static irqreturn_t vop_isr(int irq, void *data)
struct vop *vop = data;
uint32_t intr0_reg, active_irqs;
unsigned long flags;
+ int ret = IRQ_NONE;
/*
* INTR_CTRL0 register has interrupt status, enable and clear bits, we
@@ -1099,15 +1155,23 @@ static irqreturn_t vop_isr(int irq, void *data)
if (!active_irqs)
return IRQ_NONE;
- /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
- if (!(active_irqs & FS_INTR)) {
- DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
- return IRQ_NONE;
+ if (active_irqs & DSP_HOLD_VALID_INTR) {
+ complete(&vop->dsp_hold_completion);
+ active_irqs &= ~DSP_HOLD_VALID_INTR;
+ ret = IRQ_HANDLED;
+ }
+
+ if (active_irqs & FS_INTR) {
+ drm_handle_vblank(vop->drm_dev, vop->pipe);
+ active_irqs &= ~FS_INTR;
+ ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}
- drm_handle_vblank(vop->drm_dev, vop->pipe);
+ /* Unhandled irqs are spurious. */
+ if (active_irqs)
+ DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
- return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+ return ret;
}
static int vop_create_crtc(struct vop *vop)
@@ -1189,6 +1253,7 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_crtc;
}
+ init_completion(&vop->dsp_hold_completion);
crtc->port = port;
vop->pipe = drm_crtc_index(crtc);
rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
@@ -1302,7 +1367,7 @@ static int vop_initial(struct vop *vop)
clk_disable(vop->hclk);
- vop->dpms = DRM_MODE_DPMS_OFF;
+ vop->is_enabled = false;
return 0;
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index e6f6ef7c4866..6b641c5a2ec7 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -9,6 +9,8 @@
#include <linux/clk.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
@@ -77,22 +79,18 @@ static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
}
static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb)
+sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
struct clk *clk;
int rate = mode->clock * 1000;
int res;
- unsigned int w, h;
- DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
+ DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
- crtc->primary->fb->base.id, mode->base.id, mode->name);
+ mode->base.id, mode->name);
DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->vrefresh, mode->clock,
@@ -122,72 +120,13 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &crtc->mode);
- /* a GDP is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Can not find GDP0)\n");
- return -EINVAL;
- }
-
- /* copy the mode data adjusted by mode_fixup() into crtc->mode
- * so that hardware can be set to proper mode
- */
- memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
-
- res = sti_mixer_set_layer_depth(mixer, layer);
- if (res) {
- DRM_ERROR("Can not set layer depth\n");
- return -EINVAL;
- }
res = sti_mixer_active_video_area(mixer, &crtc->mode);
if (res) {
DRM_ERROR("Can not set active video area\n");
return -EINVAL;
}
- w = crtc->primary->fb->width - x;
- h = crtc->primary->fb->height - y;
-
- return sti_layer_prepare(layer, crtc,
- crtc->primary->fb, &crtc->mode,
- mixer->id, 0, 0, w, h, x, y, w, h);
-}
-
-static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct sti_layer *layer;
- unsigned int w, h;
- int ret;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- crtc->primary->fb->base.id, x, y);
-
- /* GDP is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Can not find GDP0)\n");
- ret = -EINVAL;
- goto out;
- }
-
- w = crtc->primary->fb->width - crtc->x;
- h = crtc->primary->fb->height - crtc->y;
-
- ret = sti_layer_prepare(layer, crtc,
- crtc->primary->fb, &crtc->mode,
- mixer->id, 0, 0, w, h,
- crtc->x, crtc->y, w, h);
- if (ret) {
- DRM_ERROR("Can not prepare layer\n");
- goto out;
- }
-
- sti_drm_crtc_commit(crtc);
-out:
- return ret;
+ return res;
}
static void sti_drm_crtc_disable(struct drm_crtc *crtc)
@@ -195,7 +134,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
if (!mixer->enabled)
return;
@@ -205,24 +143,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
/* Disable Background */
sti_mixer_set_background_status(mixer, false);
- /* Disable GDP */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Cannot find GDP0\n");
- return;
- }
-
- /* Disable layer at mixer level */
- if (sti_mixer_set_layer_status(mixer, layer, false))
- DRM_ERROR("Can not disable %s layer at mixer\n",
- sti_layer_to_str(layer));
-
- /* Wait a while to be sure that a Vsync event is received */
- msleep(WAIT_NEXT_VSYNC_MS);
-
- /* Then disable layer itself */
- sti_layer_disable(layer);
-
drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
@@ -237,64 +157,44 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
mixer->enabled = false;
}
-static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
- .dpms = sti_drm_crtc_dpms,
- .prepare = sti_drm_crtc_prepare,
- .commit = sti_drm_crtc_commit,
- .mode_fixup = sti_drm_crtc_mode_fixup,
- .mode_set = sti_drm_crtc_mode_set,
- .mode_set_base = sti_drm_crtc_mode_set_base,
- .disable = sti_drm_crtc_disable,
-};
+static void
+sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ sti_drm_crtc_prepare(crtc);
+ sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+}
-static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void sti_drm_atomic_begin(struct drm_crtc *crtc)
{
- struct drm_device *drm_dev = crtc->dev;
- struct drm_framebuffer *old_fb;
struct sti_mixer *mixer = to_sti_mixer(crtc);
- unsigned long flags;
- int ret;
- DRM_DEBUG_KMS("fb %d --> fb %d\n",
- crtc->primary->fb->base.id, fb->base.id);
+ if (crtc->state->event) {
+ crtc->state->event->pipe = drm_crtc_index(crtc);
- mutex_lock(&drm_dev->struct_mutex);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- old_fb = crtc->primary->fb;
- crtc->primary->fb = fb;
- ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
- if (ret) {
- DRM_ERROR("failed\n");
- crtc->primary->fb = old_fb;
- goto out;
+ mixer->pending_event = crtc->state->event;
+ crtc->state->event = NULL;
}
+}
- if (event) {
- event->pipe = mixer->id;
-
- ret = drm_vblank_get(drm_dev, event->pipe);
- if (ret) {
- DRM_ERROR("Cannot get vblank\n");
- goto out;
- }
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- if (mixer->pending_event) {
- drm_vblank_put(drm_dev, event->pipe);
- ret = -EBUSY;
- } else {
- mixer->pending_event = event;
- }
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
- }
-out:
- mutex_unlock(&drm_dev->struct_mutex);
- return ret;
+static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+{
}
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+ .dpms = sti_drm_crtc_dpms,
+ .prepare = sti_drm_crtc_prepare,
+ .commit = sti_drm_crtc_commit,
+ .mode_fixup = sti_drm_crtc_mode_fixup,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_nofb = sti_drm_crtc_mode_set_nofb,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
+ .disable = sti_drm_crtc_disable,
+ .atomic_begin = sti_drm_atomic_begin,
+ .atomic_flush = sti_drm_atomic_flush,
+};
+
static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
@@ -380,10 +280,13 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
static struct drm_crtc_funcs sti_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .page_flip = sti_drm_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
.destroy = sti_drm_crtc_destroy,
.set_property = sti_drm_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 5239fa121726..59d558b400b3 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -28,8 +30,87 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
+static void sti_drm_atomic_schedule(struct sti_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ private->commit.state = state;
+ schedule_work(&private->commit.work);
+}
+
+static void sti_drm_atomic_complete(struct sti_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = private->drm_dev;
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one condition: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ drm_atomic_helper_commit_modeset_disables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+
+ drm_atomic_helper_cleanup_planes(drm, state);
+ drm_atomic_state_free(state);
+}
+
+static void sti_drm_atomic_work(struct work_struct *work)
+{
+ struct sti_drm_private *private = container_of(work,
+ struct sti_drm_private, commit.work);
+
+ sti_drm_atomic_complete(private, private->commit.state);
+}
+
+static int sti_drm_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state, bool async)
+{
+ struct sti_drm_private *private = drm->dev_private;
+ int err;
+
+ err = drm_atomic_helper_prepare_planes(drm, state);
+ if (err)
+ return err;
+
+ /* serialize outstanding asynchronous commits */
+ mutex_lock(&private->commit.lock);
+ flush_work(&private->commit.work);
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(drm, state);
+
+ if (async)
+ sti_drm_atomic_schedule(private, state);
+ else
+ sti_drm_atomic_complete(private, state);
+
+ mutex_unlock(&private->commit.lock);
+ return 0;
+}
+
static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = sti_drm_atomic_commit,
};
static void sti_drm_mode_config_init(struct drm_device *dev)
@@ -61,6 +142,9 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)private;
private->drm_dev = dev;
+ mutex_init(&private->commit.lock);
+ INIT_WORK(&private->commit.work, sti_drm_atomic_work);
+
drm_mode_config_init(dev);
drm_kms_helper_poll_init(dev);
@@ -74,7 +158,7 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
return ret;
}
- drm_helper_disable_unused_functions(dev);
+ drm_mode_config_reset(dev);
#ifdef CONFIG_DRM_STI_FBDEV
drm_fbdev_cma_init(dev, 32,
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
index ec5e2eb8dff9..c413aa3ff402 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -24,6 +24,12 @@ struct sti_drm_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
+
+ struct {
+ struct drm_atomic_state *state;
+ struct work_struct work;
+ struct mutex lock;
+ } commit;
};
#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index bb6a29339e10..64d4ed43dda3 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -6,6 +6,10 @@
* License terms: GNU General Public License (GPL), version 2
*/
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
#include "sti_compositor.h"
#include "sti_drm_drv.h"
#include "sti_drm_plane.h"
@@ -33,9 +37,9 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct sti_mixer *mixer = to_sti_mixer(crtc);
int res;
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer), fb->base.id);
+ plane->base.id, sti_layer_to_str(layer));
DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
res = sti_mixer_set_layer_depth(mixer, layer);
@@ -110,7 +114,7 @@ static void sti_drm_plane_destroy(struct drm_plane *plane)
{
DRM_DEBUG_DRIVER("\n");
- sti_drm_disable_plane(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
}
@@ -133,10 +137,58 @@ static int sti_drm_plane_set_property(struct drm_plane *plane,
}
static struct drm_plane_funcs sti_drm_plane_funcs = {
- .update_plane = sti_drm_update_plane,
- .disable_plane = sti_drm_disable_plane,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_drm_plane_destroy,
.set_property = sti_drm_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_fb)
+{
+}
+
+static int sti_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void sti_drm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = plane->state;
+
+ sti_drm_update_plane(plane, state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+}
+
+static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *oldstate)
+{
+ sti_drm_disable_plane(plane);
+}
+
+static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
+ .prepare_fb = sti_drm_plane_prepare_fb,
+ .cleanup_fb = sti_drm_plane_cleanup_fb,
+ .atomic_check = sti_drm_plane_atomic_check,
+ .atomic_update = sti_drm_plane_atomic_update,
+ .atomic_disable = sti_drm_plane_atomic_disable,
};
static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
@@ -178,11 +230,13 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
return NULL;
}
+ drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
+
for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
if (sti_layer_default_zorder[i] == layer->desc)
break;
- default_zorder = i;
+ default_zorder = i + 1;
if (type == DRM_PLANE_TYPE_OVERLAY)
sti_drm_plane_attach_zorder_property(&layer->plane,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index aeb5070c8363..a9b678af85a6 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
@@ -364,10 +365,13 @@ static void sti_dvo_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
.destroy = sti_dvo_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index a9bbb081ecad..598cd78b0b16 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
/* HDformatter registers */
@@ -611,10 +612,13 @@ static void sti_hda_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_hda_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hda_connector_detect,
.destroy = sti_hda_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 1485ade98710..ae5424bd6b4c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -13,6 +13,7 @@
#include <linux/reset.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -663,10 +664,13 @@ static void sti_hdmi_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = sti_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a52522f5da7..a287e4fec865 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -425,8 +425,8 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane_state *state;
- if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane, plane->state);
kfree(plane->state);
plane->state = NULL;
@@ -443,12 +443,14 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_plane_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- if (copy->base.fb)
- drm_framebuffer_reference(copy->base.fb);
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ copy->tiling = state->tiling;
+ copy->format = state->format;
+ copy->swap = state->swap;
return &copy->base;
}
@@ -456,9 +458,7 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
@@ -472,13 +472,15 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
};
static int tegra_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
return 0;
}
static void tegra_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_fb)
{
}
@@ -906,6 +908,15 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
return 0;
}
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc)
+{
+ if (dc->syncpt)
+ return host1x_syncpt_read(dc->syncpt);
+
+ /* fallback to software emulated VBLANK counter */
+ return drm_crtc_vblank_count(&dc->base);
+}
+
void tegra_dc_enable_vblank(struct tegra_dc *dc)
{
unsigned long value, flags;
@@ -993,6 +1004,9 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
{
struct tegra_dc_state *state;
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state);
+
kfree(crtc->state);
crtc->state = NULL;
@@ -1009,14 +1023,15 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- copy->base.mode_changed = false;
- copy->base.active_changed = false;
- copy->base.planes_changed = false;
- copy->base.event = NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->base);
+ copy->clk = state->clk;
+ copy->pclk = state->pclk;
+ copy->div = state->div;
+ copy->planes = state->planes;
return &copy->base;
}
@@ -1024,6 +1039,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
@@ -1150,26 +1166,18 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
return 0;
}
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div)
-{
- u32 value;
- int err;
-
- err = clk_set_parent(dc->clk, parent);
- if (err < 0) {
- dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
- }
-
- DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div);
-
- value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
-
- return 0;
-}
-
+/**
+ * tegra_dc_state_setup_clock - check clock settings and store them in atomic
+ * state
+ * @dc: display controller
+ * @crtc_state: CRTC atomic state
+ * @clk: parent clock for display controller
+ * @pclk: pixel clock
+ * @div: shift clock divider
+ *
+ * Returns:
+ * 0 on success or a negative error-code on failure.
+ */
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
@@ -1177,6 +1185,9 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
{
struct tegra_dc_state *state = to_dc_state(crtc_state);
+ if (!clk_has_parent(dc->clk, clk))
+ return -EINVAL;
+
state->clk = clk;
state->pclk = pclk;
state->div = div;
@@ -1292,9 +1303,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = tegra_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.atomic_check = tegra_crtc_atomic_check,
@@ -1629,7 +1638,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- unsigned int syncpt;
u32 value;
int err;
@@ -1698,13 +1706,15 @@ static int tegra_dc_init(struct host1x_client *client)
}
/* initialize display controller */
- if (dc->pipe)
- syncpt = SYNCPT_VBLANK1;
- else
- syncpt = SYNCPT_VBLANK0;
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
- tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
@@ -1872,6 +1882,7 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
@@ -1963,6 +1974,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
+ dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+ if (!dc->syncpt)
+ dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
platform_set_drvdata(pdev, dc);
return 0;
@@ -1973,6 +1988,8 @@ static int tegra_dc_remove(struct platform_device *pdev)
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
+ host1x_syncpt_free(dc->syncpt);
+
err = host1x_client_unregister(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 705c93b00794..55792daabbb5 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -12,6 +12,8 @@
#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define SYNCPT_CNTRL_NO_STALL (1 << 8)
+#define SYNCPT_CNTRL_SOFT_RESET (1 << 0)
#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
@@ -23,6 +25,7 @@
#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define SYNCPT_VSYNC_ENABLE (1 << 8)
#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
#define DC_CMD_DISPLAY_COMMAND 0x032
#define DISP_CTRL_MODE_STOP (0 << 5)
@@ -438,8 +441,4 @@
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
-/* synchronization points */
-#define SYNCPT_VBLANK0 26
-#define SYNCPT_VBLANK1 27
-
#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7dd328d77996..1833abd7d3aa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -55,9 +55,9 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
* current layout.
*/
- drm_atomic_helper_commit_pre_planes(drm, state);
+ drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_planes(drm, state);
- drm_atomic_helper_commit_post_planes(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -172,6 +172,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
*/
drm->irq_enabled = true;
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = 0xffffffff;
+
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto device;
@@ -813,12 +817,12 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
if (!crtc)
return 0;
- /* TODO: implement real hardware counter using syncpoints */
- return drm_crtc_vblank_count(crtc);
+ return tegra_dc_get_vblank_counter(dc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
@@ -879,8 +883,18 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
return 0;
}
+static int tegra_debugfs_iova(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ return drm_mm_dump_table(s, &tegra->mm);
+}
+
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+ { "iova", tegra_debugfs_iova, 0 },
};
static int tegra_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8cb2dfeaa957..659b2fcc986d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -106,6 +106,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ struct host1x_syncpt *syncpt;
struct device *dev;
spinlock_t lock;
@@ -180,12 +181,11 @@ struct tegra_dc_window {
};
/* from dc.c */
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
void tegra_dc_enable_vblank(struct tegra_dc *dc);
void tegra_dc_disable_vblank(struct tegra_dc *dc);
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
void tegra_dc_commit(struct tegra_dc *dc);
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div);
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7eaaee74a039..06ab1783bba1 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -952,7 +952,7 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
}
tegra_hdmi_writel(hdmi,
- SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC(0) |
SOR_SEQ_PU_PC_ALT(0) |
SOR_SEQ_PD_PC(8) |
SOR_SEQ_PD_PC_ALT(8),
@@ -1394,8 +1394,8 @@ static int tegra_hdmi_exit(struct host1x_client *client)
tegra_output_exit(&hdmi->output);
- clk_disable_unprepare(hdmi->clk);
reset_control_assert(hdmi->rst);
+ clk_disable_unprepare(hdmi->clk);
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 919a19df4e1b..a882514389cd 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -201,7 +201,7 @@
#define HDMI_NV_PDISP_SOR_CRCB 0x5d
#define HDMI_NV_PDISP_SOR_BLANK 0x5e
#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
-#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0)
#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 2afe478ded3b..7591d8901f9a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -41,6 +41,8 @@ struct tegra_sor {
struct mutex lock;
bool enabled;
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
struct dentry *debugfs;
};
@@ -68,13 +70,12 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output)
return container_of(output, struct tegra_sor, output);
}
-static inline unsigned long tegra_sor_readl(struct tegra_sor *sor,
- unsigned long offset)
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset)
{
return readl(sor->regs + (offset << 2));
}
-static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
+static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
unsigned long offset)
{
writel(value, sor->regs + (offset << 2));
@@ -83,9 +84,9 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
- unsigned long value;
unsigned int i;
u8 pattern;
+ u32 value;
int err;
/* setup lane parameters */
@@ -202,7 +203,7 @@ static void tegra_sor_update(struct tegra_sor *sor)
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWM_DIV);
value &= ~SOR_PWM_DIV_MASK;
@@ -281,7 +282,7 @@ static int tegra_sor_wakeup(struct tegra_sor *sor)
static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWR);
value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
@@ -674,38 +675,195 @@ static const struct file_operations tegra_sor_crc_fops = {
.release = tegra_sor_crc_release,
};
+static int tegra_sor_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
+ tegra_sor_readl(sor, name))
+
+ DUMP_REG(SOR_CTXSW);
+ DUMP_REG(SOR_SUPER_STATE_0);
+ DUMP_REG(SOR_SUPER_STATE_1);
+ DUMP_REG(SOR_STATE_0);
+ DUMP_REG(SOR_STATE_1);
+ DUMP_REG(SOR_HEAD_STATE_0(0));
+ DUMP_REG(SOR_HEAD_STATE_0(1));
+ DUMP_REG(SOR_HEAD_STATE_1(0));
+ DUMP_REG(SOR_HEAD_STATE_1(1));
+ DUMP_REG(SOR_HEAD_STATE_2(0));
+ DUMP_REG(SOR_HEAD_STATE_2(1));
+ DUMP_REG(SOR_HEAD_STATE_3(0));
+ DUMP_REG(SOR_HEAD_STATE_3(1));
+ DUMP_REG(SOR_HEAD_STATE_4(0));
+ DUMP_REG(SOR_HEAD_STATE_4(1));
+ DUMP_REG(SOR_HEAD_STATE_5(0));
+ DUMP_REG(SOR_HEAD_STATE_5(1));
+ DUMP_REG(SOR_CRC_CNTRL);
+ DUMP_REG(SOR_DP_DEBUG_MVID);
+ DUMP_REG(SOR_CLK_CNTRL);
+ DUMP_REG(SOR_CAP);
+ DUMP_REG(SOR_PWR);
+ DUMP_REG(SOR_TEST);
+ DUMP_REG(SOR_PLL_0);
+ DUMP_REG(SOR_PLL_1);
+ DUMP_REG(SOR_PLL_2);
+ DUMP_REG(SOR_PLL_3);
+ DUMP_REG(SOR_CSTM);
+ DUMP_REG(SOR_LVDS);
+ DUMP_REG(SOR_CRC_A);
+ DUMP_REG(SOR_CRC_B);
+ DUMP_REG(SOR_BLANK);
+ DUMP_REG(SOR_SEQ_CTL);
+ DUMP_REG(SOR_LANE_SEQ_CTL);
+ DUMP_REG(SOR_SEQ_INST(0));
+ DUMP_REG(SOR_SEQ_INST(1));
+ DUMP_REG(SOR_SEQ_INST(2));
+ DUMP_REG(SOR_SEQ_INST(3));
+ DUMP_REG(SOR_SEQ_INST(4));
+ DUMP_REG(SOR_SEQ_INST(5));
+ DUMP_REG(SOR_SEQ_INST(6));
+ DUMP_REG(SOR_SEQ_INST(7));
+ DUMP_REG(SOR_SEQ_INST(8));
+ DUMP_REG(SOR_SEQ_INST(9));
+ DUMP_REG(SOR_SEQ_INST(10));
+ DUMP_REG(SOR_SEQ_INST(11));
+ DUMP_REG(SOR_SEQ_INST(12));
+ DUMP_REG(SOR_SEQ_INST(13));
+ DUMP_REG(SOR_SEQ_INST(14));
+ DUMP_REG(SOR_SEQ_INST(15));
+ DUMP_REG(SOR_PWM_DIV);
+ DUMP_REG(SOR_PWM_CTL);
+ DUMP_REG(SOR_VCRC_A_0);
+ DUMP_REG(SOR_VCRC_A_1);
+ DUMP_REG(SOR_VCRC_B_0);
+ DUMP_REG(SOR_VCRC_B_1);
+ DUMP_REG(SOR_CCRC_A_0);
+ DUMP_REG(SOR_CCRC_A_1);
+ DUMP_REG(SOR_CCRC_B_0);
+ DUMP_REG(SOR_CCRC_B_1);
+ DUMP_REG(SOR_EDATA_A_0);
+ DUMP_REG(SOR_EDATA_A_1);
+ DUMP_REG(SOR_EDATA_B_0);
+ DUMP_REG(SOR_EDATA_B_1);
+ DUMP_REG(SOR_COUNT_A_0);
+ DUMP_REG(SOR_COUNT_A_1);
+ DUMP_REG(SOR_COUNT_B_0);
+ DUMP_REG(SOR_COUNT_B_1);
+ DUMP_REG(SOR_DEBUG_A_0);
+ DUMP_REG(SOR_DEBUG_A_1);
+ DUMP_REG(SOR_DEBUG_B_0);
+ DUMP_REG(SOR_DEBUG_B_1);
+ DUMP_REG(SOR_TRIG);
+ DUMP_REG(SOR_MSCHECK);
+ DUMP_REG(SOR_XBAR_CTRL);
+ DUMP_REG(SOR_XBAR_POL);
+ DUMP_REG(SOR_DP_LINKCTL_0);
+ DUMP_REG(SOR_DP_LINKCTL_1);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE_POST_CURSOR_0);
+ DUMP_REG(SOR_LANE_POST_CURSOR_1);
+ DUMP_REG(SOR_DP_CONFIG_0);
+ DUMP_REG(SOR_DP_CONFIG_1);
+ DUMP_REG(SOR_DP_MN_0);
+ DUMP_REG(SOR_DP_MN_1);
+ DUMP_REG(SOR_DP_PADCTL_0);
+ DUMP_REG(SOR_DP_PADCTL_1);
+ DUMP_REG(SOR_DP_DEBUG_0);
+ DUMP_REG(SOR_DP_DEBUG_1);
+ DUMP_REG(SOR_DP_SPARE_0);
+ DUMP_REG(SOR_DP_SPARE_1);
+ DUMP_REG(SOR_DP_AUDIO_CTRL);
+ DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+ DUMP_REG(SOR_DP_TPG);
+ DUMP_REG(SOR_DP_TPG_CONFIG);
+ DUMP_REG(SOR_DP_LQ_CSTM_0);
+ DUMP_REG(SOR_DP_LQ_CSTM_1);
+ DUMP_REG(SOR_DP_LQ_CSTM_2);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_sor_show_regs, 0, NULL },
+};
+
static int tegra_sor_debugfs_init(struct tegra_sor *sor,
struct drm_minor *minor)
{
struct dentry *entry;
+ unsigned int i;
int err = 0;
sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
if (!sor->debugfs)
return -ENOMEM;
+ sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!sor->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ sor->debugfs_files[i].data = sor;
+
+ err = drm_debugfs_create_files(sor->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ sor->debugfs, minor);
+ if (err < 0)
+ goto free;
+
entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
&tegra_sor_crc_fops);
if (!entry) {
- dev_err(sor->dev,
- "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
- minor->debugfs_root->d_name.name);
err = -ENOMEM;
- goto remove;
+ goto free;
}
return err;
+free:
+ kfree(sor->debugfs_files);
+ sor->debugfs_files = NULL;
remove:
- debugfs_remove(sor->debugfs);
+ debugfs_remove_recursive(sor->debugfs);
sor->debugfs = NULL;
return err;
}
static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
{
- debugfs_remove_recursive(sor->debugfs);
+ drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files),
+ sor->minor);
+ sor->minor = NULL;
+
+ kfree(sor->debugfs_files);
sor->debugfs = NULL;
+
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs_files = NULL;
}
static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
@@ -791,8 +949,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
struct tegra_sor_config config;
struct drm_dp_link link;
struct drm_dp_aux *aux;
- unsigned long value;
int err = 0;
+ u32 value;
mutex_lock(&sor->lock);
@@ -1354,12 +1512,30 @@ static int tegra_sor_init(struct host1x_client *client)
}
}
+ /*
+ * XXX: Remove this reset once proper hand-over from firmware to
+ * kernel is possible.
+ */
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
return err;
}
+ usleep_range(1000, 3000);
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk_safe);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
new file mode 100644
index 000000000000..1055cb79096c
--- /dev/null
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+vgem-y := vgem_drv.o vgem_dma_buf.o
+
+obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
new file mode 100644
index 000000000000..0254438ad1a6
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_dma_buf.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
+}
+
+int vgem_gem_prime_pin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ return vgem_gem_get_pages(obj);
+}
+
+void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ vgem_gem_put_pages(obj);
+}
+
+void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+}
+
+void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_vgem_gem_object *obj = NULL;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ get_dma_buf(dma_buf);
+
+ obj->base.dma_buf = dma_buf;
+ obj->use_dma_buf = true;
+
+ return &obj->base;
+
+fail_free:
+ kfree(obj);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
new file mode 100644
index 000000000000..cb3b43525b2d
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Adam Jackson <ajax@redhat.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ */
+
+/**
+ * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
+ * software renderer and the X server for efficient buffer sharing.
+ */
+
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+#define DRIVER_NAME "vgem"
+#define DRIVER_DESC "Virtual GEM provider"
+#define DRIVER_DATE "20120112"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
+{
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
+ obj->pages = NULL;
+}
+
+static void vgem_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (vgem_obj->use_dma_buf && obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+
+ drm_gem_object_release(obj);
+
+ if (vgem_obj->pages)
+ vgem_gem_put_pages(vgem_obj);
+
+ vgem_obj->pages = NULL;
+
+ kfree(vgem_obj);
+}
+
+int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
+{
+ struct page **pages;
+
+ if (obj->pages || obj->use_dma_buf)
+ return 0;
+
+ pages = drm_gem_get_pages(&obj->base);
+ if (IS_ERR(pages)) {
+ return PTR_ERR(pages);
+ }
+
+ obj->pages = pages;
+
+ return 0;
+}
+
+static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_vgem_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->base.dev;
+ loff_t num_pages;
+ pgoff_t page_offset;
+ int ret;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ obj->pages[page_offset]);
+
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON(1);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static struct vm_operations_struct vgem_gem_vm_ops = {
+ .fault = vgem_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+/* ioctls */
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ unsigned int *handle,
+ unsigned long size)
+{
+ struct drm_vgem_gem_object *obj;
+ struct drm_gem_object *gem_object;
+ int err;
+
+ size = roundup(size, PAGE_SIZE);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_object = &obj->base;
+
+ err = drm_gem_object_init(dev, gem_object, size);
+ if (err)
+ goto out;
+
+ err = drm_gem_handle_create(file, gem_object, handle);
+ if (err)
+ goto handle_out;
+
+ drm_gem_object_unreference_unlocked(gem_object);
+
+ return gem_object;
+
+handle_out:
+ drm_gem_object_release(gem_object);
+out:
+ kfree(obj);
+ return ERR_PTR(err);
+}
+
+static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_object;
+ uint64_t size;
+ uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+
+ size = args->height * pitch;
+ if (size == 0)
+ return -EINVAL;
+
+ gem_object = vgem_gem_create(dev, file, &args->handle, size);
+
+ if (IS_ERR(gem_object)) {
+ DRM_DEBUG_DRIVER("object creation failed\n");
+ return PTR_ERR(gem_object);
+ }
+
+ args->size = gem_object->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (!drm_vma_node_has_offset(&obj->vma_node)) {
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+ }
+
+ BUG_ON(!obj->filp);
+
+ obj->filp->private_data = obj;
+
+ ret = vgem_gem_get_pages(to_vgem_bo(obj));
+ if (ret)
+ goto fail_get_pages;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ goto unref;
+
+fail_get_pages:
+ drm_gem_free_mmap_offset(obj);
+unref:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+ struct drm_gem_object *obj;
+ struct drm_vgem_gem_object *vgem_obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+
+ vgem_obj = to_vgem_bo(obj);
+
+ if (obj->dma_buf && vgem_obj->use_dma_buf) {
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+ goto out_unlock;
+ }
+
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = vgem_obj;
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_gem_vm_open(vma);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+
+static struct drm_ioctl_desc vgem_ioctls[] = {
+};
+
+static const struct file_operations vgem_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = vgem_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver vgem_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .gem_free_object = vgem_gem_free_object,
+ .gem_vm_ops = &vgem_gem_vm_ops,
+ .ioctls = vgem_ioctls,
+ .fops = &vgem_driver_fops,
+ .dumb_create = vgem_gem_dumb_create,
+ .dumb_map_offset = vgem_gem_dumb_map,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = vgem_gem_prime_import,
+ .gem_prime_pin = vgem_gem_prime_pin,
+ .gem_prime_unpin = vgem_gem_prime_unpin,
+ .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_gem_prime_vmap,
+ .gem_prime_vunmap = vgem_gem_prime_vunmap,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+struct drm_device *vgem_device;
+
+static int __init vgem_init(void)
+{
+ int ret;
+
+ vgem_device = drm_dev_alloc(&vgem_driver, NULL);
+ if (!vgem_device) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_dev_register(vgem_device, 0);
+
+ if (ret)
+ goto out_unref;
+
+ return 0;
+
+out_unref:
+ drm_dev_unref(vgem_device);
+out:
+ return ret;
+}
+
+static void __exit vgem_exit(void)
+{
+ drm_dev_unregister(vgem_device);
+ drm_dev_unref(vgem_device);
+}
+
+module_init(vgem_init);
+module_exit(vgem_exit);
+
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 886779030f1a..57ab4d8f41f9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -1,5 +1,6 @@
/*
- * Copyright © 2013 Intel Corporation
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -17,23 +18,40 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
*
- * Author: Jani Nikula <jani.nikula@intel.com>
*/
-#ifndef _INTEL_DSI_DSI_H
-#define _INTEL_DSI_DSI_H
+#ifndef _VGEM_DRV_H_
+#define _VGEM_DRV_H_
#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
+#include <drm/drm_gem.h>
+
+#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+struct drm_vgem_gem_object {
+ struct drm_gem_object base;
+ struct page **pages;
+ bool use_dma_buf;
+};
+
+/* vgem_drv.c */
+extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
+extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+
+/* vgem_dma_buf.c */
+extern struct sg_table *vgem_gem_prime_get_sg_table(
+ struct drm_gem_object *gobj);
+extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
+extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
- enum port port);
-#endif /* _INTEL_DSI_DSI_H */
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b9cbc304e..620bb5cf617c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -134,7 +134,7 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
/**
* Ioctl definitions.
@@ -1044,7 +1044,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd_drv != cmd)) {
+ if (unlikely(ioctl->cmd != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index b10550ee1d89..6b7fdc1e2ed0 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -425,6 +425,12 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp)
+{
+ return host1x_syncpt_load(sp);
+}
+EXPORT_SYMBOL(host1x_syncpt_read);
+
int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 4864f8300797..9ef2e1f54ca4 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -147,20 +147,20 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
}
-static int ipu_pixfmt_to_map(u32 fmt)
+static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
- case V4L2_PIX_FMT_RGB24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
- case V4L2_PIX_FMT_RGB565:
+ case MEDIA_BUS_FMT_RGB565_1X16:
return IPU_DC_MAP_RGB565;
- case IPU_PIX_FMT_GBR24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
return IPU_DC_MAP_GBR24;
- case V4L2_PIX_FMT_BGR666:
+ case MEDIA_BUS_FMT_RGB666_1X18:
return IPU_DC_MAP_BGR666;
- case v4l2_fourcc('L', 'V', 'D', '6'):
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
return IPU_DC_MAP_LVDS666;
- case V4L2_PIX_FMT_BGR24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
default:
return -EINVAL;
@@ -168,7 +168,7 @@ static int ipu_pixfmt_to_map(u32 fmt)
}
int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
- u32 pixel_fmt, u32 width)
+ u32 bus_format, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
u32 reg = 0;
@@ -176,7 +176,7 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
dc->di = ipu_di_get_num(di);
- map = ipu_pixfmt_to_map(pixel_fmt);
+ map = ipu_bus_format_to_map(bus_format);
if (map < 0) {
dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
return map;
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 3ddfb3d0b64d..2970c6bb668c 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -441,8 +441,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
@@ -459,8 +458,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
clkrate = clk_get_rate(di->clk_ipu);
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
rate = clkrate / div;
error = rate / (sig->mode.pixelclock / 1000);
@@ -483,8 +481,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index ad75588e1629..1dcb96ccda66 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -297,8 +297,8 @@ static int calc_resize_coeffs(struct ipu_ic *ic,
return -EINVAL;
}
- /* Cannot downsize more than 8:1 */
- if ((out_size << 3) < in_size) {
+ /* Cannot downsize more than 4:1 */
+ if ((out_size << 2) < in_size) {
dev_err(ipu->dev, "Unsupported downsize\n");
return -EINVAL;
}